repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,247
trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot)
packet_list = get_packet_list(pcap_filename)
5
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Android-/Windows-/MacOS-\n gleaned API token; the latter to be able to access HiRes fLaC audio.\n Returns a tuple of a requests.Session object, if no error, and the\n AudioFormat instance passed in; or (None, \"\") in the event of error.\n \"\"\"\n android_formats: Set[AudioFormat] = {\n AudioFormat.sony_360_reality_audio,\n AudioFormat.hi_res,\n }\n fire_tv_formats: Set[AudioFormat] = {\n AudioFormat.dolby_atmos,\n AudioFormat.mqa,\n AudioFormat.lossless,\n AudioFormat.high,\n AudioFormat.low,\n }\n if audio_format in fire_tv_formats:\n return (login_fire_tv(), audio_format)\n elif audio_format in android_formats:\n options: set = {\"android\", \"a\", \"windows\", \"w\"}\n _input: str = \"\"\n while _input not in options:\n _input = typer.prompt(\n \"For which of Android [a] or Windows [w] would you like to provide an API token?\"\n ).lower()\n else:\n if _input in {\"android\", \"a\"}:\n return (login_android(), audio_format)\n elif _input in {\"windows\", \"w\"}:\n return (login_windows(), audio_format)\n else:\n logger.critical(\n \"Please provide one of the following: \"\n f\"{', '.join(e.value for e in AudioFormat)}\"\n )\n return (None, \"\")" }, { "identifier": "AudioFormat", "path": "tidal_wave/login.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identifier": "LogLevel", "path": "tidal_wave/login.py", "snippet": "class LogLevel(str, Enum):\n debug = \"DEBUG\" # 10\n info = \"INFO\" # 20\n warning = \"WARNING\" # 30\n error = \"ERROR\" # 40\n critical = \"CRITICAL\" # 50" }, { "identifier": "Album", "path": "tidal_wave/album.py", "snippet": "class Album:\n album_id: int\n\n def __post_init__(self):\n self.album_dir: Optional[Path] = None\n self.album_cover_saved: bool = False\n\n def get_items(self, session: Session):\n \"\"\"This method populates self.tracks by requesting from\n TIDAL albums/items endpoint.\"\"\"\n album_items: AlbumsItemsResponseJSON = request_album_items(\n session=session, identifier=self.album_id\n )\n _items = album_items.items if album_items is not None else ()\n self.tracks = tuple(_item.item for _item in _items)\n\n def get_metadata(self, session: Session):\n \"\"\"This method populates self.metadata by requesting from\n TIDAL /albums endpoint\"\"\"\n self.metadata: AlbumsEndpointResponseJSON = request_albums(\n session=session, identifier=self.album_id\n )\n\n def get_review(self, session: Session):\n \"\"\"This method requests the review corresponding to self.album_id\n in TIDAL. If it exists, it is written to disk as AlbumReview.json\n in self.album_dir\"\"\"\n self.album_review: Optional[AlbumsReviewResponseJSON] = request_album_review(\n session=session, identifier=self.album_id\n )\n if self.album_review is not None:\n (self.album_dir / \"AlbumReview.json\").write_text(\n self.album_review.to_json()\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method populates self.album_dir as a sub-subdirectory of\n out_dir: its parent directory is the name of the (main) artist of\n the album\"\"\"\n artist_substring: str = self.metadata.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.metadata.name.replace('..', '')} \"\n f\"[{self.metadata.id}] [{self.metadata.release_date.year}]\"\n )\n self.album_dir = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.metadata.number_of_volumes > 1:\n for v in range(1, self.metadata.number_of_volumes + 1):\n volume_substring: str = f\"Volume {v}\"\n (out_dir / artist_substring / album_substring / volume_substring).mkdir(\n parents=True, exist_ok=True\n )\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"This method writes cover.jpg in self.album_dir via the\n utils.download_cover_image() function. If successful,\n then self.album_cover_saved takes the value True\"\"\"\n if self.album_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.cover,\n output_dir=self.album_dir,\n )\n else:\n self.album_cover_saved = True\n\n def get_tracks(\n self, session: Session, audio_format: AudioFormat, out_dir: Path\n ) -> List[Optional[str]]:\n \"\"\"This method uses self.tracks to call track.Track.get() for each\n track in self.tracks. It uses the result of each of these calls to\n populate self.track_files\"\"\"\n track_files: List[str] = [None] * self.metadata.number_of_tracks\n for i, t in enumerate(self.tracks): # type(t) is TracksEndpointResponseJSON\n track: Track = Track(track_id=t.id)\n\n track_files_value: Optional[str] = track.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=t,\n album=self.metadata,\n )\n track_files[i] = {track.metadata.track_number: track_files_value}\n else:\n self.track_files = track_files\n\n def dumps(self):\n \"\"\"This method returns a JSON-like string of self.track_files\"\"\"\n return json.dumps(self.track_files)\n\n def dump(self, fp=sys.stdout):\n \"\"\"This method writes to (by default) STDOUT a\n JSON-like string of self.track_files\"\"\"\n json.dump(self.track_files, fp)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[AlbumsEndpointResponseJSON] = None,\n ):\n \"\"\"This method is the driver method of the class. It calls the\n other methods in order:\n 1. get_metadata()\n 2. get_items()\n 3. save_cover_image()\n 4. get_review()\n 5. get_tracks()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n \n if self.metadata is None:\n self.track_files = {}\n return\n\n self.get_items(session)\n self.save_cover_image(session, out_dir)\n self.get_review(session)\n self.get_tracks(session, audio_format, out_dir)" }, { "identifier": "Artist", "path": "tidal_wave/artist.py", "snippet": "class Artist:\n artist_id: int\n\n def set_metadata(self, session: Session):\n \"\"\"This function requests from TIDAL API endpoint /artists and\n stores the results in self.metadata\"\"\"\n self.metadata: Optional[ArtistsEndpointResponseJSON] = request_artists(\n session, self.artist_id\n )\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes the bytes of self.metadata.picture to\n the file cover.jpg in self.artist_dir\"\"\"\n artist_image: Path = self.artist_dir / \"cover.jpg\"\n if not artist_image.exists():\n download_cover_image(\n session, self.metadata.picture, self.artist_dir, dimension=750\n )\n\n def set_albums(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/albums and\n stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_albums(\n session, self.artist_id\n )\n\n def set_audio_works(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint\n /artists/albums?filter=EPSANDSINGLES and stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_audio_works(\n session, self.artist_id\n )\n\n def set_videos(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/videos and\n stores the results in self.albums\"\"\"\n self.videos: Optional[ArtistsVideosResponseJSON] = request_artists_videos(\n session, self.artist_id\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method sets self.artist_dir and creates the directory on the file system\n if it does not exist\"\"\"\n self.name: str = self.metadata.name.replace(\"..\", \"\")\n self.artist_dir = out_dir / self.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def get_albums(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool = False,\n ) -> List[Optional[str]]:\n \"\"\"This method first fetches the total albums on TIDAL's service\n corresponding to the artist with ID self.artist_id. Then, each of\n the albums (and, optionally, EPs and singles) is requested and\n written to subdirectories of out_dir\"\"\"\n if include_eps_singles:\n self.set_audio_works(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} \"\n \"albums, EPs, and singles for artist with ID \"\n f\"{self.metadata.id}, '{self.name}'\"\n )\n else:\n self.set_albums(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} albums \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n\n for i, a in enumerate(self.albums.items):\n album: Album = Album(album_id=a.id)\n album.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=a,\n )\n\n def get_videos(\n self,\n session: Session,\n out_dir: Path,\n ) -> List[Optional[str]]:\n \"\"\"This method sets self.videos by calling self.set_videos()\n then, for each video, instantiates a Video object and executes\n video.get()\"\"\"\n self.set_videos(session)\n logger.info(\n f\"Starting attempt to get {self.videos.total_number_of_items} videos \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n for i, v in enumerate(self.videos.items):\n video: Video = Video(video_id=v.id)\n video.get(\n session=session,\n out_dir=out_dir,\n metadata=v,\n )\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool,\n ):\n \"\"\"This is the driver method of the class. It executes the other\n methods in order:\n 1. set_metadata\n 2. set_dir\n 3. save_artist_image\n 4. get_videos\n 5. get_albums\n \"\"\"\n self.set_metadata(session)\n \n if self.metadata is None:\n return\n \n self.set_dir(out_dir)\n self.save_artist_image(session)\n self.get_videos(session, out_dir)\n if include_eps_singles:\n self.get_albums(session, audio_format, out_dir, include_eps_singles=True)\n self.get_albums(session, audio_format, out_dir, include_eps_singles=False)" }, { "identifier": "Mix", "path": "tidal_wave/mix.py", "snippet": "class Mix:\n mix_id: str\n\n def __post_init__(self):\n self.mix_dir: Optional[Path] = None\n self.mix_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_mixes(\n session=session, mix_id=self.mix_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /mixes/items endpoint to\n populate self.items\"\"\"\n mix_items: Optional[MixesItemsResponseJSON] = get_mix(\n session=session, mix_id=self.mix_id\n )\n if mix_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[MixItem]] = tuple(mix_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.mix_dir based on self.name, self.mix_id\"\"\"\n mix_substring: str = f\"{self.name} [{self.mix_id}]\"\n self.mix_dir: Path = out_dir / \"Mixes\" / mix_substring\n self.mix_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.mix_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.mix_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n with session.get(\n url=self.metadata.image, params={k: None for k in session.params}\n ) as r:\n (self.mix_dir / \"cover.jpg\").write_bytes(r.content)\n\n self.mix_cover_saved = True\n else:\n self.mix_cover_saved = True\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_mix_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.mix_dir, meaning that it moves all downloaded\n audio and video files to self.mix_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.mix_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.mix_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.mix_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.mix_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.mix_dir\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve mix with ID '{self.mix_id}'\")\n return\n self.flatten_mix_dir()\n logger.info(f\"Mix files written to '{self.mix_dir}'\")" }, { "identifier": "Playlist", "path": "tidal_wave/playlist.py", "snippet": "class Playlist:\n playlist_id: str # UUID4\n\n def __post_init__(self):\n self.playlist_dir: Optional[Path] = None\n self.playlist_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists(\n session=session, identifier=self.playlist_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /playlists/items endpoint to\n populate self.items\"\"\"\n playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist(\n session=session, playlist_id=self.playlist_id\n )\n if playlist_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.playlist_dir based on self.name, self.playlist_id\"\"\"\n playlist_substring: str = f\"{self.name} [{self.playlist_id}]\"\n self.playlist_dir: Path = out_dir / \"Playlists\" / playlist_substring\n self.playlist_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.playlist_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.playlist_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.square_image,\n output_dir=self.playlist_dir,\n dimension=1080,\n )\n else:\n self.playlist_cover_saved = True\n\n def save_description(self):\n \"\"\"Requests self.metadata.description and attempts to write it to disk\"\"\"\n description_path: Path = self.playlist_dir / \"PlaylistDescription.txt\"\n if self.metadata.description is not None and len(self.metadata.description) > 0:\n if not description_path.exists():\n description_path.write_text(f\"{self.metadata.description}\\n\")\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_playlist_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.playlist_dir, meaning that it moves all downloaded\n audio and video files to self.playlist_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.playlist_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.playlist_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.playlist_dir\n\n def craft_m3u8_text(self):\n \"\"\"This method creates a file called playlist.m3u8 in self.playlist_dir\n that is a standard M3U. Needs to be called after self.flatten_playlist_dir\n in order to be able to access self.files\n N.b. the already-written file is temporarily copied to a .mp4 version in a\n temporary directory because .m4a files cannot be read with mutagen.\"\"\"\n m3u_text: str = f\"#EXTM3U\\n#EXTENC:UTF-8\\n#EXTIMG:{str(self.cover_path.absolute())}\\n#PLAYLIST:{self.name}\\n\"\n\n logger.info(\n f\"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'\"\n )\n for d in self.files:\n file: str = next(iter(d.values()))\n if file is None:\n continue\n elif file.endswith(\".flac\"):\n m = mutagen.File(file)\n artist: str = m.get(\"artist\", [\"\"])[0]\n title: str = m.get(\"title\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".mka\"):\n m = mutagen.File(file)\n artist: str = m.get(\"ARTI\", [\"\"])[0]\n title: str = m.get(\"TITL\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".m4a\"):\n # Mutagen cannot read .m4a files, so make a copy with all\n # of the metadata tags as a .mp4 in a temporary directory\n with temporary_file(suffix=\".mp4\") as tf:\n ffmpeg.input(file, hide_banner=None, y=None).output(\n tf.name,\n acodec=\"copy\",\n vcodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n m = mutagen.File(tf.name)\n artist: str = m.get(\"\\xa9ART\", [\"\"])[0]\n title: str = m.get(\"\\xa9nam\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n else:\n return m3u_text\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.save_description()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve playlist with ID '{self.playlist_id}'\")\n return\n\n self.flatten_playlist_dir()\n\n try:\n m3u8_text: str = self.craft_m3u8_text()\n except Exception as e:\n logger.warning(\n \"Unable to create playlist.m3u8 file for \"\n f\"playlist with ID '{self.playlist_id}'\"\n )\n logger.debug(e)\n else:\n with open(self.playlist_dir / \"playlist.m3u8\", \"w\") as f:\n f.write(m3u8_text)\n\n logger.info(f\"Playlist files written to '{self.playlist_dir}'\")" }, { "identifier": "Track", "path": "tidal_wave/track.py", "snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None" }, { "identifier": "Video", "path": "tidal_wave/video.py", "snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})" }, { "identifier": "match_tidal_url", "path": "tidal_wave/models.py", "snippet": "def match_tidal_url(input_str: str) -> Optional[TidalResource]:\n \"\"\"Attempt to match the `input_str` to either the URL of a track or an\n album in the Tidal API service. Returns None if `input_str` matches\n neither, otherwise a subclass of TidalResource corresponding to the\n parsed input_str type\n \"\"\"\n resource_match: Optional[TidalResource] = None\n tidal_resources: Tuple[TidalResource] = (\n TidalTrack,\n TidalAlbum,\n TidalVideo,\n TidalPlaylist,\n TidalMix,\n TidalArtist,\n )\n for T in tidal_resources:\n try:\n resource_match = T(input_str)\n except ValueError as v:\n logger.debug(v)\n continue\n else:\n return resource_match" }, { "identifier": "TidalAlbum", "path": "tidal_wave/models.py", "snippet": "class TidalAlbum(TidalResource):\n \"\"\"Class representing a TIDAL album. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?album/(\\d{5,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalArtist", "path": "tidal_wave/models.py", "snippet": "class TidalArtist(TidalResource):\n \"\"\"Class representing a TIDAL artist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?artist/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalMix", "path": "tidal_wave/models.py", "snippet": "class TidalMix(TidalResource):\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?mix/(\\w{30})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL mix URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL mix ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalPlaylist", "path": "tidal_wave/models.py", "snippet": "class TidalPlaylist(TidalResource):\n \"\"\"Class representing a TIDAL playlist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?playlist/\"\n r\"([0-9a-f]{8}\\-[0-9a-f]{4}\\-4[0-9a-f]{3}\\-[89ab][0-9a-f]{3}\\-[0-9a-f]{12})(?:.*?)?\"\n )\n\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL playlist URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL playlist ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalTrack", "path": "tidal_wave/models.py", "snippet": "class TidalTrack(TidalResource):\n \"\"\"Class representing a TIDAL track. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?(?:album/\\d{5,9}/)?track/(\\d{5,9})(?:.*?)?\"\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL track URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL track ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalVideo", "path": "tidal_wave/models.py", "snippet": "class TidalVideo(TidalResource):\n \"\"\"Class representing a TIDAL video. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?video/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL video URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL video ID parsed from input: {self.tidal_id}\")" } ]
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,473
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist): artist = Artist(artist_id=tidal_resource.tidal_id) artist.get( session=session, audio_format=audio_format, out_dir=output_directory, include_eps_singles=include_eps_singles, ) raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalVideo): video = Video(video_id=tidal_resource.tidal_id) video.get(session=session, out_dir=output_directory) if loglevel == LogLevel.debug: video.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalPlaylist):
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist): artist = Artist(artist_id=tidal_resource.tidal_id) artist.get( session=session, audio_format=audio_format, out_dir=output_directory, include_eps_singles=include_eps_singles, ) raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalVideo): video = Video(video_id=tidal_resource.tidal_id) video.get(session=session, out_dir=output_directory) if loglevel == LogLevel.debug: video.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalPlaylist):
playlist = Playlist(playlist_id=tidal_resource.tidal_id)
6
2023-12-12 21:50:25+00:00
24k
lbcb-sci/GNNome
pipeline.py
[ { "identifier": "train", "path": "train.py", "snippet": "def train(train_path, valid_path, out, assembler, overfit=False, dropout=None, seed=None, resume=False):\n hyperparameters = get_hyperparameters()\n if seed is None:\n seed = hyperparameters['seed']\n num_epochs = hyperparameters['num_epochs']\n num_gnn_layers = hyperparameters['num_gnn_layers']\n hidden_features = hyperparameters['dim_latent']\n nb_pos_enc = hyperparameters['nb_pos_enc']\n patience = hyperparameters['patience']\n lr = hyperparameters['lr']\n device = hyperparameters['device']\n batch_norm = hyperparameters['batch_norm']\n node_features = hyperparameters['node_features']\n edge_features = hyperparameters['edge_features']\n hidden_edge_features = hyperparameters['hidden_edge_features']\n hidden_edge_scores = hyperparameters['hidden_edge_scores']\n decay = hyperparameters['decay']\n wandb_mode = hyperparameters['wandb_mode']\n wandb_project = hyperparameters['wandb_project']\n num_nodes_per_cluster = hyperparameters['num_nodes_per_cluster']\n npc_lower_bound = hyperparameters['npc_lower_bound']\n npc_upper_bound = hyperparameters['npc_upper_bound']\n k_extra_hops = hyperparameters['k_extra_hops']\n masking = hyperparameters['masking']\n mask_frac_low = hyperparameters['mask_frac_low']\n mask_frac_high = hyperparameters['mask_frac_high']\n use_symmetry_loss = hyperparameters['use_symmetry_loss']\n alpha = hyperparameters['alpha'] \n\n config = get_config()\n checkpoints_path = os.path.abspath(config['checkpoints_path'])\n models_path = os.path.abspath(config['models_path'])\n\n print(f'----- TRAIN -----')\n print(f'\\nSaving checkpoints: {checkpoints_path}')\n print(f'Saving models: {models_path}\\n')\n \n print(f'USING SEED: {seed}')\n\n if torch.cuda.is_available():\n torch.cuda.set_device(device)\n utils.set_seed(seed)\n\n time_start = datetime.now()\n timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S')\n \n if out is None:\n out = timestamp\n assert train_path is not None, \"train_path not specified!\"\n assert valid_path is not None, \"valid_path not specified!\"\n\n if not overfit:\n ds_train = AssemblyGraphDataset(train_path, assembler=assembler)\n ds_valid = AssemblyGraphDataset(valid_path, assembler=assembler)\n else:\n ds_train = ds_valid = AssemblyGraphDataset(train_path, assembler=assembler)\n\n pos_to_neg_ratio = sum([((g.edata['y']==1).sum() / (g.edata['y']==0).sum()).item() for idx, g in ds_train]) / len(ds_train)\n\n model = models.SymGatedGCNModel(node_features, edge_features, hidden_features, hidden_edge_features, num_gnn_layers, hidden_edge_scores, batch_norm, nb_pos_enc, dropout=dropout)\n model.to(device)\n if not os.path.exists(models_path):\n print(models_path)\n os.makedirs(models_path)\n\n out = out + f'_seed{seed}'\n\n model_path = os.path.join(models_path, f'model_{out}.pt') # TODO: Delete this?\n model_min_loss_path = os.path.join(models_path, f'model_min-loss_{out}.pt')\n \n print(f'MODEL PATH: {model_path}')\n \n ckpt_path = f'{checkpoints_path}/ckpt_{out}.pt'\n print(f'CHECKPOINT PATH: {ckpt_path}')\n\n print(f'\\nNumber of network parameters: {view_model_param(model)}\\n')\n print(f'Normalization type : Batch Normalization\\n') if batch_norm else print(f'Normalization type : Layer Normalization\\n')\n\n pos_weight = torch.tensor([1 / pos_to_neg_ratio], device=device)\n criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=decay, patience=patience, verbose=True)\n start_epoch = 0\n\n loss_per_epoch_train, loss_per_epoch_valid = [], []\n f1_inv_per_epoch_valid = []\n\n if not os.path.exists(checkpoints_path):\n os.makedirs(checkpoints_path)\n \n if resume:\n # ckpt_path = f'{checkpoints_path}/ckpt_{out}.pt' # This should be the checkpoint of the old run\n checkpoint = torch.load(ckpt_path)\n print('Loding the checkpoint from:', ckpt_path, sep='\\t')\n model_path = os.path.join(models_path, f'model_{out}_resumed-{num_epochs}.pt')\n ckpt_path = os.path.join(checkpoints_path, f'ckpt_{out}_resumed-{num_epochs}.pt')\n print('Saving the resumed model to:', model_path, sep='\\t')\n print('Saving the new checkpoint to:', ckpt_path, sep='\\t')\n \n start_epoch = checkpoint['epoch'] + 1\n print(f'Resuming from epoch: {start_epoch}')\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optim_state_dict'])\n \n min_loss_train = checkpoint['loss_train']\n min_loss_valid = checkpoint['loss_valid']\n loss_per_epoch_train.append(min_loss_train)\n loss_per_epoch_valid.append(min_loss_valid)\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'Loading data done. Elapsed time: {elapsed}')\n\n try:\n with wandb.init(project=wandb_project, config=hyperparameters, mode=wandb_mode, name=out):\n wandb.watch(model, criterion, log='all', log_freq=1000)\n\n for epoch in range(start_epoch, num_epochs):\n\n train_loss_all_graphs, train_fp_rate_all_graphs, train_fn_rate_all_graphs = [], [], []\n train_acc_all_graphs, train_precision_all_graphs, train_recall_all_graphs, train_f1_all_graphs = [], [], [], []\n \n train_loss_epoch, train_fp_rate_epoch, train_fn_rate_epoch = [], [], []\n train_acc_epoch, train_precision_epoch, train_recall_epoch, train_f1_epoch = [], [], [], []\n train_acc_inv_epoch, train_precision_inv_epoch, train_recall_inv_epoch, train_f1_inv_epoch = [], [], [], []\n train_aps_epoch, train_aps_inv_epoch = [], []\n\n print('\\n===> TRAINING\\n')\n random.shuffle(ds_train.graph_list)\n for data in ds_train:\n model.train()\n idx, g = data\n \n print(f'\\n(TRAIN: Epoch = {epoch:3}) NEW GRAPH: index = {idx}')\n\n if masking:\n fraction = random.randint(mask_frac_low, mask_frac_high) / 100 # Fraction of nodes to be left in the graph (.85 -> ~30x, 1.0 -> 60x)\n g = mask_graph_strandwise(g, fraction, device)\n\n # Number of clusters dependant on graph size!\n num_nodes_per_cluster_min = int(num_nodes_per_cluster * npc_lower_bound)\n num_nodes_per_cluster_max = int(num_nodes_per_cluster * npc_upper_bound) + 1\n num_nodes_for_g = torch.LongTensor(1).random_(num_nodes_per_cluster_min, num_nodes_per_cluster_max).item()\n num_clusters = g.num_nodes() // num_nodes_for_g + 1\n\n if num_nodes_for_g >= g.num_nodes(): # train with full graph\n print(f'\\nUse METIS: False')\n print(f'Use full graph')\n g = g.to(device)\n\n if use_symmetry_loss:\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n # pe = g.ndata['pe'].to(device)\n # pe = (pe - pe.mean()) / pe.std()\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n # pe = torch.cat((pe_in, pe_out, pe), dim=1)\n pe = torch.cat((pe_in, pe_out), dim=1)\n org_scores = model(g, x, e, pe).squeeze(-1)\n edge_predictions = org_scores\n edge_labels = g.edata['y'].to(device)\n \n g = dgl.reverse(g, True, True)\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n # pe = g.ndata['pe'].to(device)\n # pe = (pe - pe.mean()) / pe.std()\n pe_out = g.ndata['in_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe_in = g.ndata['out_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n # pe = torch.cat((pe_in, pe_out, pe), dim=1)\n pe = torch.cat((pe_in, pe_out), dim=1)\n rev_scores = model(g, x, e, pe).squeeze(-1)\n loss = symmetry_loss(org_scores, rev_scores, edge_labels, pos_weight, alpha=alpha)\n else:\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n # pe = g.ndata['pe'].to(device)\n # pe = (pe - pe.mean()) / pe.std()\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n # pe = torch.cat((pe_in, pe_out, pe), dim=1)\n pe = torch.cat((pe_in, pe_out), dim=1)\n edge_predictions = model(g, x, e, pe)\n edge_predictions = edge_predictions.squeeze(-1)\n edge_labels = g.edata['y'].to(device)\n loss = criterion(edge_predictions, edge_labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_loss = loss.item()\n TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)\n acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)\n try:\n fp_rate = FP / (FP + TN)\n except ZeroDivisionError:\n fp_rate = 0.0\n try:\n fn_rate = FN / (FN + TP)\n except ZeroDivisionError:\n fn_rate = 0.0\n train_fp_rate = fp_rate\n train_fn_rate = fn_rate\n train_acc = acc\n train_precision = precision\n train_recall = recall\n train_f1 = f1\n \n train_loss_epoch.append(loss.item())\n train_fp_rate_epoch.append(fp_rate)\n train_fn_rate_epoch.append(fn_rate)\n\n # elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n # print(f'\\nTRAINING (one training graph): Epoch = {epoch}, Graph = {idx}')\n # print(f'Loss: {train_loss:.4f}, fp_rate(GT=0): {train_fp_rate:.4f}, fn_rate(GT=1): {train_fn_rate:.4f}')\n # print(f'elapsed time: {elapsed}\\n\\n')\n\n else: # train with mini-batch\n print(f'\\nUse METIS: True')\n print(f'Number of clusters:', num_clusters)\n g = g.long()\n d = dgl.metis_partition(g, num_clusters, extra_cached_hops=k_extra_hops)\n sub_gs = list(d.values())\n random.shuffle(sub_gs)\n \n # Loop over all mini-batch in the graph\n running_loss, running_fp_rate, running_fn_rate = [], [], []\n running_acc, running_precision, running_recall, running_f1 = [], [], [], []\n\n for sub_g in sub_gs:\n \n if use_symmetry_loss:\n sub_g = sub_g.to(device)\n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n org_scores = model(sub_g, x, e, pe).squeeze(-1)\n labels = g.edata['y'][sub_g.edata['_ID']].to(device)\n \n sub_g = dgl.reverse(sub_g, True, True)\n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_out = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe_in = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n rev_scores = model(sub_g, x, e, pe).squeeze(-1)\n \n loss = symmetry_loss(org_scores, rev_scores, labels, pos_weight, alpha=alpha)\n edge_predictions = org_scores\n edge_labels = labels\n\n else:\n sub_g = sub_g.to(device)\n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n edge_predictions = model(sub_g, x, e, pe) \n edge_predictions = edge_predictions.squeeze(-1)\n\n edge_labels = g.edata['y'][sub_g.edata['_ID']].to(device)\n loss = criterion(edge_predictions, edge_labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)\n acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)\n acc_inv, precision_inv, recall_inv, f1_inv = utils.calculate_metrics_inverse(TP, TN, FP, FN)\n \n try:\n fp_rate = FP / (FP + TN)\n except ZeroDivisionError:\n fp_rate = 0.0\n try:\n fn_rate = FN / (FN + TP)\n except ZeroDivisionError:\n fn_rate = 0.0\n \n # Append results of a single mini-batch / METIS partition\n # These are used for epoch mean = mean over partitions over graphs - mostly DEPRECATED\n running_loss.append(loss.item())\n running_fp_rate.append(fp_rate)\n running_fn_rate.append(fn_rate)\n running_acc.append(acc)\n running_precision.append(precision)\n running_recall.append(recall)\n running_f1.append(f1)\n \n # These are used for epoch mean = mean over all the partitions in all the graphs\n train_loss_epoch.append(loss.item())\n train_fp_rate_epoch.append(fp_rate)\n train_fn_rate_epoch.append(fn_rate)\n train_acc_epoch.append(acc)\n train_precision_epoch.append(precision)\n train_recall_epoch.append(recall)\n train_f1_epoch.append(f1)\n \n # Inverse metrics because F1 and them are not good for dataset with mostly positive labels\n train_acc_inv_epoch.append(acc_inv)\n train_precision_inv_epoch.append(precision_inv)\n train_recall_inv_epoch.append(recall_inv)\n train_f1_inv_epoch.append(f1_inv)\n\n # Average over all mini-batches (partitions) in a single graph - mostly DEPRECATED\n train_loss = np.mean(running_loss)\n train_fp_rate = np.mean(running_fp_rate)\n train_fn_rate = np.mean(running_fn_rate)\n train_acc = np.mean(running_acc)\n train_precision = np.mean(running_precision)\n train_recall = np.mean(running_recall)\n train_f1 = np.mean(running_f1)\n\n # elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n # print(f'\\nTRAINING (one training graph): Epoch = {epoch}, Graph = {idx}')\n # print(f'Loss: {train_loss:.4f}, fp_rate(GT=0): {train_fp_rate:.4f}, fn_rate(GT=1): {train_fn_rate:.4f}')\n # print(f'elapsed time: {elapsed}\\n\\n')\n\n # Record after each graph in the dataset - mostly DEPRECATED\n train_loss_all_graphs.append(train_loss)\n train_fp_rate_all_graphs.append(train_fp_rate)\n train_fn_rate_all_graphs.append(train_fn_rate)\n train_acc_all_graphs.append(train_acc)\n train_precision_all_graphs.append(train_precision)\n train_recall_all_graphs.append(train_recall)\n train_f1_all_graphs.append(train_f1)\n\n # Average over all the training graphs in one epoch - mostly DEPRECATED\n train_loss_all_graphs = np.mean(train_loss_all_graphs)\n train_fp_rate_all_graphs = np.mean(train_fp_rate_all_graphs)\n train_fn_rate_all_graphs = np.mean(train_fn_rate_all_graphs)\n train_acc_all_graphs = np.mean(train_acc_all_graphs)\n train_precision_all_graphs = np.mean(train_precision_all_graphs)\n train_recall_all_graphs = np.mean(train_recall_all_graphs)\n train_f1_all_graphs = np.mean(train_f1_all_graphs)\n \n # Average over all the partitions in one epoch\n train_loss_epoch = np.mean(train_loss_epoch)\n train_fp_rate_epoch = np.mean(train_fp_rate_epoch)\n train_fn_rate_epoch = np.mean(train_fn_rate_epoch)\n train_acc_epoch = np.mean(train_acc_epoch)\n train_precision_epoch = np.mean(train_precision_epoch)\n train_recall_epoch = np.mean(train_recall_epoch)\n train_f1_epoch = np.mean(train_f1_epoch)\n \n train_acc_inv_epoch = np.mean(train_acc_inv_epoch)\n train_precision_inv_epoch = np.mean(train_precision_inv_epoch)\n train_recall_inv_epoch = np.mean(train_recall_inv_epoch)\n train_f1_inv_epoch = np.mean(train_f1_inv_epoch)\n\n loss_per_epoch_train.append(train_loss_epoch)\n lr_value = optimizer.param_groups[0]['lr']\n \n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'\\n==> TRAINING (all training graphs): Epoch = {epoch}')\n print(f'Loss: {train_loss_epoch:.4f}, fp_rate(GT=0): {train_fp_rate_epoch:.4f}, fn_rate(GT=1): {train_fn_rate_epoch:.4f}')\n print(f'Elapsed time: {elapsed}\\n\\n')\n\n if overfit:\n if len(loss_per_epoch_valid) == 1 or len(loss_per_epoch_train) > 1 and loss_per_epoch_train[-1] < min(loss_per_epoch_train[:-1]):\n torch.save(model.state_dict(), model_path)\n print(f'Epoch {epoch}: Model saved!')\n save_checkpoint(epoch, model, optimizer, loss_per_epoch_train[-1], 0.0, out, ckpt_path)\n scheduler.step(train_loss_all_graphs)\n wandb.log({'train_loss': train_loss_all_graphs, 'train_accuracy': train_acc_all_graphs, \\\n 'train_precision': train_precision_all_graphs, 'lr_value': lr_value, \\\n 'train_recall': train_recall_all_graphs, 'train_f1': train_f1_all_graphs, \\\n 'train_fp-rate': train_fp_rate_all_graphs, 'train_fn-rate': train_fn_rate_all_graphs})\n\n continue # This will entirely skip the validation\n\n val_loss_all_graphs, val_fp_rate_all_graphs, val_fn_rate_all_graphs = [], [], []\n val_acc_all_graphs, val_precision_all_graphs, val_recall_all_graphs, val_f1_all_graphs = [], [], [], []\n \n valid_loss_epoch, valid_fp_rate_epoch, valid_fn_rate_epoch = [], [], []\n valid_acc_epoch, valid_precision_epoch, valid_recall_epoch, valid_f1_epoch = [], [], [], []\n valid_acc_inv_epoch, valid_precision_inv_epoch, valid_recall_inv_epoch, valid_f1_inv_epoch = [], [], [], []\n valid_aps_epoch, valid_aps_inv_epoch = [], []\n\n with torch.no_grad():\n print('\\n===> VALIDATION\\n')\n time_start_eval = datetime.now()\n model.eval()\n for data in ds_valid:\n idx, g = data\n \n print(f'\\n(VALID Epoch = {epoch:3}) NEW GRAPH: index = {idx}')\n \n if masking:\n fraction = random.randint(mask_frac_low, mask_frac_high) / 100 # Fraction of nodes to be left in the graph (.85 -> ~30x, 1.0 -> 60x)\n g = mask_graph_strandwise(g, fraction, device)\n \n # Number of clusters dependant on graph size!\n num_nodes_per_cluster_min = int(num_nodes_per_cluster * npc_lower_bound)\n num_nodes_per_cluster_max = int(num_nodes_per_cluster * npc_upper_bound) + 1\n num_nodes_for_g = torch.LongTensor(1).random_(num_nodes_per_cluster_min, num_nodes_per_cluster_max).item() # DEBUG!!!\n num_clusters = g.num_nodes() // num_nodes_for_g + 1\n \n if num_nodes_for_g >= g.num_nodes(): # full graph\n print(f'\\nUse METIS: False')\n print(f'Use full graph')\n g = g.to(device)\n\n if use_symmetry_loss:\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n org_scores = model(g, x, e, pe).squeeze(-1)\n edge_predictions = org_scores\n edge_labels = g.edata['y'].to(device)\n \n g = dgl.reverse(g, True, True)\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n pe_out = g.ndata['in_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe_in = g.ndata['out_deg'].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n rev_scores = model(g, x, e, pe).squeeze(-1)\n loss = symmetry_loss(org_scores, rev_scores, edge_labels, pos_weight, alpha=alpha) \n else:\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n edge_predictions = model(g, x, e, pe)\n edge_predictions = edge_predictions.squeeze(-1)\n edge_labels = g.edata['y'].to(device)\n loss = criterion(edge_predictions, edge_labels)\n\n val_loss = loss.item()\n TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)\n acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)\n try:\n fp_rate = FP / (FP + TN)\n except ZeroDivisionError:\n fp_rate = 0.0\n try:\n fn_rate = FN / (FN + TP)\n except ZeroDivisionError:\n fn_rate = 0.0\n val_fp_rate = fp_rate\n val_fn_rate = fn_rate\n val_acc = acc\n val_precision = precision\n val_recall = recall\n val_f1 = f1\n \n valid_loss_epoch.append(loss.item())\n valid_fp_rate_epoch.append(fp_rate)\n valid_fn_rate_epoch.append(fn_rate)\n\n # elapsed = utils.timedelta_to_str(datetime.now() - time_start_eval)\n # print(f'\\nVALIDATION (one validation graph): Epoch = {epoch}, Graph = {idx}')\n # print(f'Loss: {val_loss:.4f}, fp_rate(GT=0): {val_fp_rate:.4f}, fn_rate(GT=1): {val_fn_rate:.4f}')\n # print(f'elapsed time: {elapsed}\\n\\n')\n\n else: # mini-batch\n print(f'\\nNum clusters:', num_clusters)\n g = g.long()\n d = dgl.metis_partition(g, num_clusters, extra_cached_hops=k_extra_hops)\n sub_gs = list(d.values())\n # g = g.to(device)\n\n # For loop over all mini-batch in the graph\n running_loss, running_fp_rate, running_fn_rate = [], [], []\n running_acc, running_precision, running_recall, running_f1 = [], [], [], []\n \n for sub_g in sub_gs:\n \n if use_symmetry_loss:\n sub_g = sub_g.to(device) \n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n org_scores = model(sub_g, x, e, pe).squeeze(-1)\n labels = g.edata['y'][sub_g.edata['_ID']].to(device)\n\n sub_g = dgl.reverse(sub_g, True, True)\n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_out = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe_in = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device) # Reversed edges, in/out-deg also reversed\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n rev_scores = model(sub_g, x, e, pe).squeeze(-1)\n \n loss = symmetry_loss(org_scores, rev_scores, labels, pos_weight, alpha=alpha)\n edge_predictions = org_scores\n edge_labels = labels\n else:\n sub_g = sub_g.to(device)\n x = g.ndata['x'][sub_g.ndata['_ID']].to(device)\n e = g.edata['e'][sub_g.edata['_ID']].to(device)\n pe_in = g.ndata['in_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'][sub_g.ndata['_ID']].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1)\n edge_predictions = model(sub_g, x, e, pe) \n edge_predictions = edge_predictions.squeeze(-1)\n \n edge_labels = g.edata['y'][sub_g.edata['_ID']].to(device)\n loss = criterion(edge_predictions, edge_labels)\n\n TP, TN, FP, FN = utils.calculate_tfpn(edge_predictions, edge_labels)\n acc, precision, recall, f1 = utils.calculate_metrics(TP, TN, FP, FN)\n acc_inv, precision_inv, recall_inv, f1_inv = utils.calculate_metrics_inverse(TP, TN, FP, FN)\n \n try:\n fp_rate = FP / (FP + TN)\n except ZeroDivisionError:\n fp_rate = 0.0\n try:\n fn_rate = FN / (FN + TP)\n except ZeroDivisionError:\n fn_rate = 0.0\n\n # Append results of a single mini-batch / METIS partition\n # These are used for epoch mean = mean over partitions over graphs - mostly DEPRECATED\n running_loss.append(loss.item())\n running_fp_rate.append(fp_rate)\n running_fn_rate.append(fn_rate)\n running_acc.append(acc)\n running_precision.append(precision)\n running_recall.append(recall)\n running_f1.append(f1)\n \n # These are used for epoch mean = mean over all the partitions in all the graphs\n valid_loss_epoch.append(loss.item())\n valid_fp_rate_epoch.append(fp_rate)\n valid_fn_rate_epoch.append(fn_rate)\n valid_acc_epoch.append(acc)\n valid_precision_epoch.append(precision)\n valid_recall_epoch.append(recall)\n valid_f1_epoch.append(f1)\n \n # Inverse metrics because F1 and them are not good for dataset with mostly positive labels\n valid_acc_inv_epoch.append(acc_inv)\n valid_precision_inv_epoch.append(precision_inv)\n valid_recall_inv_epoch.append(recall_inv)\n valid_f1_inv_epoch.append(f1_inv)\n\n # Average over all mini-batches (partitions) in a single graph - mostly DEPRECATED\n val_loss = np.mean(running_loss)\n val_fp_rate = np.mean(running_fp_rate)\n val_fn_rate = np.mean(running_fn_rate)\n val_acc = np.mean(running_acc)\n val_precision = np.mean(running_precision)\n val_recall = np.mean(running_recall)\n val_f1 = np.mean(running_f1)\n\n # elapsed = utils.timedelta_to_str(datetime.now() - time_start_eval)\n # print(f'\\nVALIDATION (one validation graph): Epoch = {epoch}, Graph = {idx}')\n # print(f'Loss: {val_loss:.4f}, fp_rate(GT=0): {val_fp_rate:.4f}, fn_rate(GT=1): {val_fn_rate:.4f}')\n # print(f'elapsed time: {elapsed}\\n\\n')\n\n # Record after each graph in the dataset - mostly DEPRECATED\n val_loss_all_graphs.append(val_loss)\n val_fp_rate_all_graphs.append(val_fp_rate)\n val_fn_rate_all_graphs.append(val_fn_rate)\n val_acc_all_graphs.append(val_acc)\n val_precision_all_graphs.append(val_precision)\n val_recall_all_graphs.append(val_recall)\n val_f1_all_graphs.append(val_f1)\n\n # Average over all the training graphs in one epoch - mostly DEPRECATED\n val_loss_all_graphs = np.mean(val_loss_all_graphs)\n val_fp_rate_all_graphs = np.mean(val_fp_rate_all_graphs)\n val_fn_rate_all_graphs = np.mean(val_fn_rate_all_graphs)\n val_acc_all_graphs = np.mean(val_acc_all_graphs)\n val_precision_all_graphs = np.mean(val_precision_all_graphs)\n val_recall_all_graphs = np.mean(val_recall_all_graphs)\n val_f1_all_graphs = np.mean(val_f1_all_graphs)\n \n # Average over all the partitions in one epoch\n valid_loss_epoch = np.mean(valid_loss_epoch)\n valid_fp_rate_epoch = np.mean(valid_fp_rate_epoch)\n valid_fn_rate_epoch = np.mean(valid_fn_rate_epoch)\n valid_acc_epoch = np.mean(valid_acc_epoch)\n valid_precision_epoch = np.mean(valid_precision_epoch)\n valid_recall_epoch = np.mean(valid_recall_epoch)\n valid_f1_epoch = np.mean(valid_f1_epoch)\n\n valid_acc_inv_epoch = np.mean(valid_acc_inv_epoch)\n valid_precision_inv_epoch = np.mean(valid_precision_inv_epoch)\n valid_recall_inv_epoch = np.mean(valid_recall_inv_epoch)\n valid_f1_inv_epoch = np.mean(valid_f1_inv_epoch)\n\n loss_per_epoch_valid.append(valid_loss_epoch)\n f1_inv_per_epoch_valid.append(valid_f1_inv_epoch)\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'\\n==> VALIDATION (all validation graphs): Epoch = {epoch}')\n print(f'Loss: {valid_loss_epoch:.4f}, fp_rate(GT=0): {valid_fp_rate_epoch:.4f}, fn_rate(GT=1): {valid_fn_rate_epoch:.4f}')\n print(f'Elapsed time total: {elapsed}\\n\\n')\n\n if not overfit:\n # Choose the model with minimal loss on validation set\n if len(loss_per_epoch_valid) == 1 or len(loss_per_epoch_valid) > 1 and loss_per_epoch_valid[-1] < min(loss_per_epoch_valid[:-1]):\n torch.save(model.state_dict(), model_min_loss_path)\n print(f'Epoch {epoch:3}: Model MIN-LOSS saved! -> Val Loss = {valid_loss_epoch:.6f}\\tVal F1 = {valid_f1_epoch:.4f}\\tVal inv-F1 = {valid_f1_inv_epoch:.4f}' \\\n f'\\tVal FPR = {valid_fp_rate_epoch:.4f}\\tVal FNR = {valid_fn_rate_epoch:.4f}\\t')\n save_checkpoint(epoch, model, optimizer, min(loss_per_epoch_train), min(loss_per_epoch_valid), out, ckpt_path) # Save the checkpoint every epoch\n scheduler.step(valid_loss_epoch)\n\n # Code that evalates NGA50 during training -- only for overfitting\n plot_nga50_during_training = hyperparameters['plot_nga50_during_training']\n i = hyperparameters['chr_overfit']\n eval_frequency = hyperparameters['eval_frequency']\n if overfit and plot_nga50_during_training and (epoch+1) % eval_frequency == 0:\n # call inference\n refs_path = hyperparameters['refs_path']\n save_dir = os.path.join(train_path, assembler)\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n if not os.path.isdir(os.path.join(save_dir, f'assembly')):\n os.mkdir(os.path.join(save_dir, f'assembly'))\n if not os.path.isdir(os.path.join(save_dir, f'inference')):\n os.mkdir(os.path.join(save_dir, f'inference'))\n if not os.path.isdir(os.path.join(save_dir, f'reports')):\n os.mkdir(os.path.join(save_dir, f'reports'))\n inference(train_path, model_path, assembler, save_dir)\n # call evaluate\n ref = os.path.join(refs_path, 'chromosomes', f'chr{i}.fasta')\n idx = os.path.join(refs_path, 'indexed', f'chr{i}.fasta.fai')\n asm = os.path.join(save_dir, f'assembly', f'0_assembly.fasta')\n report = os.path.join(save_dir, f'reports', '0_minigraph.txt')\n paf = os.path.join(save_dir, f'asm.paf')\n p = evaluate.run_minigraph(ref, asm, paf)\n p.wait()\n p = evaluate.parse_pafs(idx, report, paf)\n p.wait()\n with open(report) as f:\n text = f.read()\n ng50 = int(re.findall(r'NG50\\s*(\\d+)', text)[0])\n nga50 = int(re.findall(r'NGA50\\s*(\\d+)', text)[0])\n print(f'NG50: {ng50}\\tNGA50: {nga50}')\n\n try:\n if 'nga50' in locals():\n wandb.log({'train_loss': train_loss_all_graphs, 'val_loss': val_loss_all_graphs, 'lr_value': lr_value, \\\n 'train_loss_aggr': train_loss_epoch, 'train_fpr_aggr': train_fp_rate_epoch, 'train_fnr_aggr': train_fn_rate_epoch, \\\n 'valid_loss_aggr': valid_loss_epoch, 'valid_fpr_aggr': valid_fp_rate_epoch, 'valid_fnr_aggr': valid_fn_rate_epoch, \\\n 'train_acc_aggr': train_acc_epoch, 'train_precision_aggr': train_precision_epoch, 'train_recall_aggr': train_recall_epoch, 'train_f1_aggr': train_f1_epoch, \\\n 'valid_acc_aggr': valid_acc_epoch, 'valid_precision_aggr': valid_precision_epoch, 'valid_recall_aggr': valid_recall_epoch, 'valid_f1_aggr': valid_f1_epoch, \\\n 'train_precision_inv_aggr': train_precision_inv_epoch, 'train_recall_inv_aggr': train_recall_inv_epoch, 'train_f1_inv_aggr': train_f1_inv_epoch, \\\n 'valid_precision_inv_aggr': valid_precision_inv_epoch, 'valid_recall_inv_aggr': valid_recall_inv_epoch, 'valid_f1_inv_aggr': valid_f1_inv_epoch, \\\n 'NG50': ng50, 'NGA50': nga50})\n else:\n wandb.log({'train_loss': train_loss_all_graphs, 'val_loss': val_loss_all_graphs, 'lr_value': lr_value, \\\n 'train_loss_aggr': train_loss_epoch, 'train_fpr_aggr': train_fp_rate_epoch, 'train_fnr_aggr': train_fn_rate_epoch, \\\n 'valid_loss_aggr': valid_loss_epoch, 'valid_fpr_aggr': valid_fp_rate_epoch, 'valid_fnr_aggr': valid_fn_rate_epoch, \\\n 'train_acc_aggr': train_acc_epoch, 'train_precision_aggr': train_precision_epoch, 'train_recall_aggr': train_recall_epoch, 'train_f1_aggr': train_f1_epoch, \\\n 'valid_acc_aggr': valid_acc_epoch, 'valid_precision_aggr': valid_precision_epoch, 'valid_recall_aggr': valid_recall_epoch, 'valid_f1_aggr': valid_f1_epoch, \\\n 'train_precision_inv_aggr': train_precision_inv_epoch, 'train_recall_inv_aggr': train_recall_inv_epoch, 'train_f1_inv_aggr': train_f1_inv_epoch, \\\n 'valid_precision_inv_aggr': valid_precision_inv_epoch, 'valid_recall_inv_aggr': valid_recall_inv_epoch, 'valid_f1_inv_aggr': valid_f1_inv_epoch})\n except Exception as e:\n print(f'WandB exception occured!')\n print(e)\n\n except KeyboardInterrupt:\n torch.cuda.empty_cache()\n print(\"Keyboard Interrupt...\")\n print(\"Exiting...\")\n\n finally:\n torch.cuda.empty_cache()" }, { "identifier": "inference", "path": "inference.py", "snippet": "def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None):\n \"\"\"Using a pretrained model, get walks and contigs on new data.\"\"\"\n hyperparameters = get_hyperparameters()\n seed = hyperparameters['seed']\n num_gnn_layers = hyperparameters['num_gnn_layers']\n hidden_features = hyperparameters['dim_latent']\n nb_pos_enc = hyperparameters['nb_pos_enc']\n\n batch_norm = hyperparameters['batch_norm']\n node_features = hyperparameters['node_features']\n edge_features = hyperparameters['edge_features']\n hidden_edge_features = hyperparameters['hidden_edge_features']\n hidden_edge_scores = hyperparameters['hidden_edge_scores']\n\n strategy = hyperparameters['strategy']\n B = hyperparameters['B']\n nb_paths = hyperparameters['num_decoding_paths']\n len_threshold = hyperparameters['len_threshold']\n use_labels = hyperparameters['decode_with_labels']\n load_checkpoint = hyperparameters['load_checkpoint']\n threads = hyperparameters['num_threads']\n\n # assembly_path = hyperparameters['asms_path']\n\n device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph\n utils.set_seed(seed)\n time_start = datetime.now()\n\n ds = AssemblyGraphDataset(data_path, assembler)\n\n inference_dir = os.path.join(savedir, 'decode')\n if not os.path.isdir(inference_dir):\n os.makedirs(inference_dir)\n\n checkpoint_dir = os.path.join(savedir, 'checkpoint')\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n walks_per_graph = []\n contigs_per_graph = []\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'\\nelapsed time (loading network and data): {elapsed}\\n')\n\n for idx, g in ds:\n # Get scores\n print(f'==== Processing graph {idx} ====')\n with torch.no_grad():\n time_start_get_scores = datetime.now()\n g = g.to(device)\n x = g.ndata['x'].to(device)\n e = g.edata['e'].to(device)\n pe_in = g.ndata['in_deg'].unsqueeze(1).to(device)\n pe_in = (pe_in - pe_in.mean()) / pe_in.std()\n pe_out = g.ndata['out_deg'].unsqueeze(1).to(device)\n pe_out = (pe_out - pe_out.mean()) / pe_out.std()\n pe = torch.cat((pe_in, pe_out), dim=1) # No PageRank\n \n if use_labels: # Debugging\n print('Decoding with labels...')\n g.edata['score'] = g.edata['y'].clone()\n else:\n print('Decoding with model scores...')\n predicts_path = os.path.join(inference_dir, f'{idx}_predicts.pt')\n if os.path.isfile(predicts_path):\n print(f'Loading the scores from:\\n{predicts_path}\\n')\n g.edata['score'] = torch.load(predicts_path)\n else:\n print(f'Loading model parameters from: {model_path}')\n model = models.SymGatedGCNModel(node_features, edge_features, hidden_features, hidden_edge_features, num_gnn_layers, hidden_edge_scores, batch_norm, nb_pos_enc, dropout=dropout)\n model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))\n model.eval()\n model.to(device)\n print(f'Computing the scores with the model...\\n')\n edge_predictions = model(g, x, e, pe)\n g.edata['score'] = edge_predictions.squeeze()\n torch.save(g.edata['score'], os.path.join(inference_dir, f'{idx}_predicts.pt'))\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_scores)\n print(f'elapsed time (get_scores): {elapsed}')\n\n # Load info data\n print(f'Loading successors...')\n with open(f'{data_path}/{assembler}/info/{idx}_succ.pkl', 'rb') as f_succs:\n succs = pickle.load(f_succs)\n print(f'Loading predecessors...')\n with open(f'{data_path}/{assembler}/info/{idx}_pred.pkl', 'rb') as f_preds:\n preds = pickle.load(f_preds)\n print(f'Loading edges...')\n with open(f'{data_path}/{assembler}/info/{idx}_edges.pkl', 'rb') as f_edges:\n edges = pickle.load(f_edges)\n print(f'Done loading the auxiliary graph data!')\n\n # Get walks\n time_start_get_walks = datetime.now()\n \n # Some prefixes can be <0 and that messes up the assemblies\n g.edata['prefix_length'] = g.edata['prefix_length'].masked_fill(g.edata['prefix_length']<0, 0)\n \n if strategy == 'greedy':\n walks = get_contigs_greedy(g, succs, preds, edges, nb_paths, len_threshold, use_labels, checkpoint_dir, load_checkpoint, device='cpu', threads=threads)\n else:\n print('Invalid decoding strategy')\n raise Exception\n \n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_walks)\n print(f'elapsed time (get_walks): {elapsed}')\n inference_path = os.path.join(inference_dir, f'{idx}_walks.pkl')\n pickle.dump(walks, open(f'{inference_path}', 'wb'))\n \n print(f'Loading reads...')\n with open(f'{data_path}/{assembler}/info/{idx}_reads.pkl', 'rb') as f_reads:\n reads = pickle.load(f_reads)\n print(f'Done!')\n \n time_start_get_contigs = datetime.now()\n contigs = evaluate.walk_to_sequence(walks, g, reads, edges)\n elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_contigs)\n print(f'elapsed time (get_contigs): {elapsed}')\n\n assembly_dir = os.path.join(savedir, f'assembly')\n if not os.path.isdir(assembly_dir):\n os.makedirs(assembly_dir)\n evaluate.save_assembly(contigs, assembly_dir, idx)\n walks_per_graph.append(walks)\n contigs_per_graph.append(contigs)\n\n elapsed = utils.timedelta_to_str(datetime.now() - time_start)\n print(f'elapsed time (total): {elapsed}')\n \n if DEBUG:\n exit(0)\n\n print(f'Found contigs for {data_path}!')\n print(f'Model used: {model_path}')\n print(f'Assembly saved in: {savedir}')" } ]
import argparse import gzip import os import re import pickle import subprocess import time import torch import requests import graph_dataset import evaluate import train_valid_chrs import hyperparameters from datetime import datetime from tqdm import tqdm from Bio import SeqIO, AlignIO from train import train from inference import inference
14,681
def evaluate_genome(eval_path, assembler, model_path, asm_path, ref_path, genome, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) print(f'New genome') chr_path = os.path.join(real_path, genome) save_path = os.path.join(save_dir, genome) if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = ref_path idx = ref_path + '.fai' asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) p.wait() p = evaluate.parse_pafs(idx, report, paf) p.wait() evaluate.parse_minigraph_for_full(save_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--out', type=str, default=None, help='Output name for models') parser.add_argument('--overfit', action='store_true', default=False, help='Overfit on the chromosomes in the train directory') args = parser.parse_args() out = args.out overfit = args.overfit hyperparams = hyperparameters.get_hyperparameters() data_path = hyperparams['data_path'] # Location of the master database (storage) temp_path = hyperparams['temp_path'] # Location where the data will be temporarily stored for training eval_path = hyperparams['eval_path'] # Location where the synth and real evaluation data is stored refs_path = hyperparams['refs_path'] # Location where the references are stored - local because everythin else can be generated from this asms_path = hyperparams['asms_path'] # Where the assemblies and other inference info will be stored assembler = hyperparams['assembler'] # Which assembler we are using, currently: raven/hifiasm models_path = hyperparams['models_path'] threads = hyperparams['num_threads'] # dataset = hyperparams['dataset'] # Which dataset we are using, currently it's only chm13 data_path_ont = hyperparams['data_path_ont'] eval_path_ont = hyperparams['eval_path_ont'] initials = hyperparams['initials'] time_start = datetime.now() if out is None: timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S') out = f'{timestamp}_{initials}' else: timestamp = time_start.strftime('%y-%m-%d') out = f'{timestamp}_{initials}_{out}' # Model name must start with the date when the model was trained, in the yy-mm-dd format # Following is the underscore and a name of the model # E.g., 22-10-31_modelA # All the info about the training (train/valid data, hyperparameters, etc.) should be stored in the logbook # You can also include them in the model name, but they NEED to be stored in the logbook! model_name = out # In the inference, model_name represents the model used for evaluation # All the inference data (predictions, walks, assemblies, and reports) # Will be stored in a directory with name {model_name}_{decoding} # Suffix should indicate info about the decoding strategy = hyperparams['strategy'] B = hyperparams['B'] num_decoding_paths = hyperparams['num_decoding_paths'] if strategy == 'greedy': save_dir = f'{model_name}_Gx{num_decoding_paths}' elif strategy == 'beamsearch': save_dir = f'{model_name}_B{B}x{num_decoding_paths}' dicts = train_valid_chrs.get_config() train_dict = dicts['train_dict'] valid_dict = dicts['valid_dict'] test_dict = dicts['test_dict'] train_dict_ont = dicts['train_dict_ont'] valid_dict_ont = dicts['valid_dict_ont'] test_dict_ont = {} specs = { 'threads': threads, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } torch.set_num_threads(threads) model_path = os.path.join(models_path, f'model_{model_name}.pt') all_chr = merge_dicts(train_dict, valid_dict, test_dict) all_chr_ont = merge_dicts(train_dict_ont, valid_dict_ont) # file_structure_setup(data_path, refs_path) # download_reference(refs_path) # simulate_reads_hifi(data_path, refs_path, all_chr, assembler) # simulate_reads_combo(data_path, refs_path, all_chr, assembler) # generate_graphs_hifi(data_path, all_chr, assembler) # simulate_reads_ont(data_path_ont, refs_path, all_chr_ont, 'raven') # generate_graphs_ont(data_path_ont, all_chr_ont, 'raven') # exit(0) if overfit: train_path, valid_path, test_path = train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict, out, overfit=True)
#### DEPRECATED #### def change_description(file_path): new_fasta = [] for record in SeqIO.parse(file_path, file_path[-5:]): # 'fasta' for FASTA file, 'fastq' for FASTQ file des = record.description.split(",") id = des[0][5:] if des[1] == "forward": strand = '+' else: strand = '-' position = des[2][9:].split("-") start = position[0] end = position[1] record.id = id record.description = f'strand={strand} start={start} end={end}' new_fasta.append(record) SeqIO.write(new_fasta, file_path, "fasta") def change_description2(fastq_path, maf_path, chr): chr = int(chr[3:]) reads = {r.id: r for r in SeqIO.parse(fastq_path, 'fastq')} # print(len(reads)) # counter = 0 for align in AlignIO.parse(maf_path, 'maf'): ref, read_m = align start = ref.annotations['start'] end = start + ref.annotations['size'] strand = '+' if read_m.annotations['strand'] == 1 else '-' description = f'strand={strand} start={start} end={end} chr={chr}' reads[read_m.id].id += f'_chr{chr}' reads[read_m.id].name += f'_chr{chr}' reads[read_m.id].description = description # counter += 1 # print(counter) fasta_path = fastq_path[:-1] + 'a' SeqIO.write(list(reads.values()), fasta_path, 'fasta') os.remove(fastq_path) return fasta_path def create_chr_dirs(pth): for i in range(1, 24): if i == 23: i = 'X' subprocess.run(f'mkdir chr{i}', shell=True, cwd=pth) subprocess.run(f'mkdir raw raven hifiasm', shell=True, cwd=os.path.join(pth, f'chr{i}')) subprocess.run(f'mkdir processed info output graphia', shell=True, cwd=os.path.join(pth, f'chr{i}/raven')) subprocess.run(f'mkdir processed info output graphia', shell=True, cwd=os.path.join(pth, f'chr{i}/hifiasm')) def merge_dicts(d1, d2, d3={}): keys = {*d1, *d2, *d3} merged = {key: d1.get(key, 0) + d2.get(key, 0) + d3.get(key, 0) for key in keys} return merged # -1. Set up the data file structure def file_structure_setup(data_path, refs_path): # TODO: Do something with this! return print(f'SETUP::filesystem:: Create directories for storing data') if not os.path.isdir(data_path): os.makedirs(data_path) if 'CHM13' not in os.listdir(refs_path): os.mkdir(os.path.join(refs_path, 'CHM13')) if 'chromosomes' not in os.listdir(refs_path): os.mkdir(os.path.join(refs_path, 'chromosomes')) if 'simulated_hifi' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'simulated_hifi')) create_chr_dirs(os.path.join(data_path, 'simulated_hifi')) if 'simulated_ont' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'simulated_ont')) create_chr_dirs(os.path.join(data_path, 'simulated_ont')) # if 'real' not in os.listdir(data_path): # subprocess.run(f'bash download_dataset.sh {data_path}', shell=True) # os.mkdir(os.path.join(data_path, 'real')) # create_chr_dirs(os.path.join(data_path, 'real')) if 'experiments' not in os.listdir(data_path): os.mkdir(os.path.join(data_path, 'experiments')) # 0. Download the CHM13 if necessary def download_reference(refs_path): chm_path = os.path.join(refs_path, 'CHM13') chr_path = os.path.join(refs_path, 'chromosomes') chm13_url = 'https://s3-us-west-2.amazonaws.com/human-pangenomics/T2T/CHM13/assemblies/chm13.draft_v1.1.fasta.gz' chm13_path = os.path.join(chm_path, 'chm13.draft_v1.1.fasta.gz') if len(os.listdir(chm_path)) == 0: # Download the CHM13 reference # Code for tqdm from: https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests print(f'SETUP::download:: CHM13 not found! Downloading...') response = requests.get(chm13_url, stream=True) total_size_in_bytes= int(response.headers.get('content-length', 0)) block_size = 1024 #1 Kibibyte progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True) with open(chm13_path, 'wb') as file: for data in response.iter_content(block_size): progress_bar.update(len(data)) file.write(data) progress_bar.close() if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes: print("ERROR, something went wrong") if len(os.listdir(chr_path)) == 0: # Parse the CHM13 into individual chromosomes print(f'SETUP::download:: Split CHM13 per chromosome') with gzip.open(chm13_path, 'rt') as f: for record in SeqIO.parse(f, 'fasta'): SeqIO.write(record, os.path.join(chr_path, f'{record.id}.fasta'), 'fasta') def handle_pbsim_output(idx, chrN, chr_raw_path, combo=False): if combo == True: idx = chrN subprocess.run(f'mv {idx}_0001.fastq {idx}.fastq', shell=True, cwd=chr_raw_path) subprocess.run(f'mv {idx}_0001.maf {idx}.maf', shell=True, cwd=chr_raw_path) subprocess.run(f'rm {idx}_0001.ref', shell=True, cwd=chr_raw_path) fastq_path = os.path.join(chr_raw_path, f'{idx}.fastq') maf_path = os.path.join(chr_raw_path, f'{idx}.maf') print(f'Adding positions for training...') fasta_path = change_description2(fastq_path, maf_path, chr=chrN) # Extract positional info from the MAF file print(f'Removing the MAF file...') subprocess.run(f'rm {idx}.maf', shell=True, cwd=chr_raw_path) if combo: return fasta_path else: return None # 1. Simulate the sequences - HiFi def simulate_reads_hifi(data_path, refs_path, chr_dict, assembler): print(f'SETUP::simulate') if 'vendor' not in os.listdir(): os.mkdir('vendor') pbsim3_dir = f'/home/vrcekl/pbsim3' data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if chrN_flag.endswith('_r'): continue if '+' in chrN_flag: continue # elif chrN_flag.endswith('_chm13'): # chrN = chrN_flag[:-6] # chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') # pbsim_path = os.path.join(data_path, 'chm13_pbsim3') # chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') # sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' # sample_profile_id = f'chm13' # depth = 60 # elif chrN_flag.endswith('_ncbr'): # chrN = chrN_flag[:-5] # chr_path = os.path.join(refs_path, 'ncoibor', 'chromosomes') # pbsim_path = os.path.join(data_path, 'ncoibor_pbsim3') # chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') # sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' # sample_profile_id = f'chm13' # depth = 60 elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_path = os.path.join(refs_path, f'HG002', 'hg002_chromosomes') # TODO: redefine refs path pbsim_path = os.path.join(data_path, 'hg002_pbsim3') # TODO: redefine data path chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/HG002/20kb/m64011_190830_220126.sub.fastq' # TODO: Need to provide this as an argument sample_profile_id = f'20kb-m64011_190830_220126' # TODO: Need to provide this as an argument depth = 60 else: print('Give valid suffix!') raise Exception chr_raw_path = os.path.join(pbsim_path, f'{chrN}/raw') chr_processed_path = os.path.join(pbsim_path, f'{chrN}/{assembler}/processed') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) # TODO: Fix so that you can delete raw files raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files n_have = max(all_files) + 1 if all_files else 0 if n_need <= n_have: continue n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN_flag} with PBSIM3') for i in range(n_diff): idx = n_have + i chr_save_path = os.path.join(chr_raw_path, f'{idx}.fasta') print(f'\nStep {i}: Simulating reads {chr_save_path}') # Use the CHM13/HG002 profile for all the chromosomes if f'sample_profile_{sample_profile_id}.fastq' not in os.listdir(pbsim3_dir): subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) handle_pbsim_output(idx, chrN, chr_raw_path) def simulate_reads_combo(data_path, refs_path, chr_dict, assembler): data_path = os.path.abspath(data_path) pbsim_path = os.path.join(data_path, 'combo') pbsim3_dir = hyperparameters.get_hyperparameters()['pbsim3_dir'] if len(pbsim3_dir) == 0: pbsim3_dir = 'pbsim3' for chrN_combo, n_need in chr_dict.items(): if '+' not in chrN_combo: continue chromosomes = chrN_combo.split('+') # chr1_chm13+chr2_chm13+chr3_chm13 chr_raw_path = os.path.join(pbsim_path, f'{chrN_combo}/raw') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) chr_processed_path = os.path.join(pbsim_path, f'{chrN_combo}/{assembler}/processed') # TODO: Fix so that you can delete raw files if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files if all_files: n_have = max(all_files) + 1 else: n_have = 0 if n_need <= n_have: continue else: n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN_combo} with PBSIM3') # Simulate reads for chrN_combo n_diff times for i in range(n_diff): idx = n_have + i all_reads = [] for chromosome in chromosomes: if chromosome.endswith('_chm13'): chrN = chromosome[:-6] chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' sample_profile_id = f'chm13' depth = 30 elif chromosome.endswith('_ncbr'): chrN = chromosome[:-5] chr_path = os.path.join(refs_path, 'ncoibor', 'chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/CHM13/chm13_subsampled.fastq' sample_profile_id = f'chm13' depth = 30 elif chromosome.endswith('_hg002'): chrN = chromosome[:-6] chr_path = os.path.join(refs_path, 'HG002', 'hg002_chromosomes') chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') chr_save_path = os.path.join(chr_raw_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data/HG002/20kb/m64011_190830_220126.sub.fastq' sample_profile_id = f'20kb-m64011_190830_220126' depth = 30 print(f'\nStep {i}: Simulating reads {chr_save_path}') if f'sample_profile_{sample_profile_id}.fastq' not in os.listdir(pbsim3_dir): subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{chrN}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{chrN}', shell=True, cwd=pbsim3_dir) fasta_path = handle_pbsim_output(idx, chrN, chr_raw_path, combo=True) # Because it's combo we pass chrN instead of idx! We get idx.fasta later # Combining individual chromosome FASTAs into a unified list print(f'Appending the list of all the reads with {chromosome}...', end='\t') all_reads.extend(list(SeqIO.parse(fasta_path, 'fasta'))) subprocess.run(f'rm {fasta_path}', shell=True) print(f'Done!') # Saving the unified FASTA file as idx.fasta all_reads_path = os.path.join(chr_raw_path, f'{idx}.fasta') SeqIO.write(all_reads, all_reads_path, 'fasta') def simulate_reads_ont(data_path, refs_path, chr_dict, assembler='raven'): print(f'SETUP::simulate') if 'vendor' not in os.listdir(): os.mkdir('vendor') pbsim3_dir = f'/home/vrcekl/pbsim3' data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if chrN_flag.endswith('_r'): # Training on real reads continue if '+' in chrN_flag: # Training on combined synthetic chromosomes continue elif chrN_flag.endswith('_ncbr'): # Training on synthetic ncoibor chromosomes continue elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] # chrN_chm13 chr_path = os.path.join(refs_path, 'CHM13', 'chromosomes') pbsim_path = os.path.join(data_path, 'chm13_pbsim3') chr_seq_path = os.path.join(chr_path, f'{chrN}.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data-DELETE/CHM13/ONT/chm13_ont-subsampled_2M_trimmed.fastq' sample_profile_id = f'chm13-ont' depth = 120 elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] # chrN_hg002 chr_path = os.path.join(refs_path, 'HG002', 'hg002_chromosomes') pbsim_path = os.path.join(data_path, 'hg002_pbsim3') chr_seq_path = os.path.join(chr_path, f'{chrN}_MATERNAL.fasta') sample_file_path = f'/mnt/sod2-project/csb4/wgs/lovro/sequencing_data-DELETE/CHM13/ONT/chm13_ont-subsampled_2M_trimmed.fastq' sample_profile_id = f'chm13-ont' depth = 120 else: print(f'Chromosome suffix incorrect!') raise Exception chr_raw_path = os.path.join(pbsim_path, f'{chrN}/raw') chr_processed_path = os.path.join(pbsim_path, f'{chrN}/{assembler}/processed') if not os.path.isdir(chr_raw_path): os.makedirs(chr_raw_path) if not os.path.isdir(chr_processed_path): os.makedirs(chr_processed_path) # TODO: Fix so that you can delete raw files raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(chr_raw_path)} prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(chr_processed_path)} all_files = raw_files | prc_files n_have = max(all_files) + 1 if all_files else 0 if n_need <= n_have: continue n_diff = n_need - n_have print(f'SETUP::simulate:: Simulate {n_diff} datasets for {chrN} with PBSIM3') for i in range(n_diff): idx = n_have + i chr_save_path = os.path.join(chr_raw_path, f'{idx}.fasta') print(f'\nStep {i}: Simulating reads {chr_save_path}') if f'sample_profile_{sample_profile_id}.fastq' in os.listdir(pbsim3_dir): # Use the CHM13 profile for all the chromosomes subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) else: subprocess.run(f'./src/pbsim --strategy wgs --method sample --depth {depth} --genome {chr_seq_path} ' \ f'--sample {sample_file_path} ' \ f'--sample-profile-id {sample_profile_id} --prefix {chr_raw_path}/{idx}', shell=True, cwd=pbsim3_dir) handle_pbsim_output(idx, chrN, chr_raw_path) # 2. Generate the graphs def generate_graphs_hifi(data_path, chr_dict, assembler): print(f'SETUP::generate') if 'raven' not in os.listdir('vendor'): print(f'SETUP::generate:: Download Raven') subprocess.run(f'git clone -b print_graphs https://github.com/lbcb-sci/raven', shell=True, cwd='vendor') subprocess.run(f'cmake -S ./ -B./build -DRAVEN_BUILD_EXE=1 -DCMAKE_BUILD_TYPE=Release', shell=True, cwd='vendor/raven') subprocess.run(f'cmake --build build', shell=True, cwd='vendor/raven') data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if '+' in chrN_flag: chrN = chrN_flag # Called chrN_combo in simulate_reads_combo function chr_sim_path = os.path.join(data_path, 'combo', f'{chrN}') elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(data_path, 'real', f'{chrN}') elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'chm13_pbsim3', f'{chrN}') elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(data_path, 'ncoibor_pbsim3', f'{chrN}') elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'hg002_pbsim3', f'{chrN}') else: print(f'Give valid suffix') raise Exception chr_raw_path = os.path.join(chr_sim_path, 'raw') chr_prc_path = os.path.join(chr_sim_path, f'{assembler}/processed') n_raw = len(os.listdir(chr_raw_path)) n_prc = len(os.listdir(chr_prc_path)) n_diff = max(0, n_raw - n_prc) print(f'SETUP::generate:: Generate {n_diff} graphs for {chrN}') specs = { 'threads': 32, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } graph_dataset.AssemblyGraphDataset_HiFi(chr_sim_path, nb_pos_enc=None, assembler=assembler, specs=specs, generate=True) def generate_graphs_ont(data_path, chr_dict, assembler): print(f'SETUP::generate') if 'raven' not in os.listdir('vendor'): print(f'SETUP::generate:: Download Raven') subprocess.run(f'git clone -b print_graphs https://github.com/lbcb-sci/raven', shell=True, cwd='vendor') subprocess.run(f'cmake -S ./ -B./build -DRAVEN_BUILD_EXE=1 -DCMAKE_BUILD_TYPE=Release', shell=True, cwd='vendor/raven') subprocess.run(f'cmake --build build', shell=True, cwd='vendor/raven') data_path = os.path.abspath(data_path) for chrN_flag, n_need in chr_dict.items(): if '+' in chrN_flag: chrN = chrN_flag # Called chrN_combo in simulate_reads_combo function chr_sim_path = os.path.join(data_path, 'combo', f'{chrN}') elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(data_path, 'real', f'{chrN}') elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'chm13_pbsim3', f'{chrN}') elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(data_path, 'hg002_pbsim3', f'{chrN}') else: print(f'Chromosome suffix incorrect!') raise Exception chr_raw_path = os.path.join(chr_sim_path, 'raw') chr_prc_path = os.path.join(chr_sim_path, f'{assembler}/processed') n_raw = len(os.listdir(chr_raw_path)) n_prc = len(os.listdir(chr_prc_path)) n_diff = max(0, n_raw - n_prc) print(f'SETUP::generate:: Generate {n_diff} graphs for {chrN}') specs = { 'threads': 32, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': 'raven', } graph_dataset.AssemblyGraphDataset_ONT(chr_sim_path, nb_pos_enc=None, assembler='raven', specs=specs, generate=True) # 2.5 Train-valid-test split def train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict={}, out=None, overfit=False): print(f'SETUP::split') data_path = os.path.abspath(data_path) eval_path = os.path.abspath(eval_path) if overfit: data_path = eval_path real_path = os.path.join(eval_path, 'real') # pbsim_path = os.path.join(data_path, 'pbsim3') ncoibor_path = os.path.join(data_path, 'ncoibor_pbsim3') hg002_path = os.path.join(data_path, 'hg002_pbsim3') combo_path = os.path.join(data_path, 'combo') chm13_path = os.path.join(data_path, 'chm13_pbsim3') arab_path = os.path.join(data_path, 'arabidopsis_pbsim3') zmays_path = os.path.join(data_path, 'zmays_Mo17_pbsim3') exp_path = temp_path if out is None: train_path = os.path.join(exp_path, f'train', assembler) valid_path = os.path.join(exp_path, f'valid', assembler) test_path = os.path.join(exp_path, f'test', assembler) else: train_path = os.path.join(exp_path, f'train_{out}', assembler) valid_path = os.path.join(exp_path, f'valid_{out}', assembler) test_path = os.path.join(exp_path, f'test_{out}', assembler) if not os.path.isdir(train_path): os.makedirs(train_path) subprocess.run(f'mkdir processed info', shell=True, cwd=train_path) if not os.path.isdir(valid_path): os.makedirs(valid_path) subprocess.run(f'mkdir processed info', shell=True, cwd=valid_path) if not os.path.isdir(test_path) and len(test_dict) > 0: os.makedirs(test_path) subprocess.run(f'mkdir processed info', shell=True, cwd=test_path) train_g_to_chr = {} # Remember chromosomes for each graph in the dataset train_g_to_org_g = {} # Remember index of the graph in the master dataset for each graph in this dataset n_have = 0 if assembler == 'both': assemblers = ['hifiasm', 'raven'] else: assemblers = [assembler] for assembler in assemblers: for chrN_flag, n_need in train_dict.items(): # copy n_need datasets from chrN into train dict if '_r' in chrN_flag and n_need > 1: print(f'SETUP::split::WARNING Cannot copy more than one graph for real data: {chrN_flag}') n_need = 1 print(f'SETUP::split:: Copying {n_need} graphs of {chrN_flag} - {assembler} into {train_path}') for i in range(n_need): if '+' in chrN_flag: chrN = chrN_flag chr_sim_path = os.path.join(combo_path, chrN, assembler) elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(real_path, 'chm13_chromosomes', chrN, assembler) # elif chrN_flag.endswith('_pbs'): # chrN = chrN_flag[:-4] # chr_sim_path = os.path.join(pbsim_path, chrN, assembler) elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(ncoibor_path, chrN, assembler) elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(hg002_path, chrN, assembler) elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(chm13_path, chrN, assembler) elif chrN_flag.endswith('_arab'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(arab_path, chrN, assembler) elif chrN_flag.endswith('_zmays'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(zmays_path, chrN, assembler) else: print(f'Give proper suffix!') raise Exception train_g_to_chr[n_have] = chrN print(f'Copying {chr_sim_path}/processed/{i}.dgl into {train_path}/processed/{n_have}.dgl') subprocess.run(f'cp {chr_sim_path}/processed/{i}.dgl {train_path}/processed/{n_have}.dgl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_succ.pkl {train_path}/info/{n_have}_succ.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_pred.pkl {train_path}/info/{n_have}_pred.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_edges.pkl {train_path}/info/{n_have}_edges.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{i}_reads.pkl {train_path}/info/{n_have}_reads.pkl', shell=True) train_g_to_org_g[n_have] = i n_have += 1 pickle.dump(train_g_to_chr, open(f'{train_path}/info/g_to_chr.pkl', 'wb')) pickle.dump(train_g_to_org_g, open(f'{train_path}/info/g_to_org_g.pkl', 'wb')) valid_g_to_chr = {} valid_g_to_org_g = {} n_have = 0 for assembler in assemblers: for chrN_flag, n_need in valid_dict.items(): # copy n_need datasets from chrN into train dict if '_r' in chrN_flag and n_need > 1: print(f'SETUP::split::WARNING Cannot copy more than one graph for real data: {chrN_flag}') n_need = 1 print(f'SETUP::split:: Copying {n_need} graphs of {chrN_flag} - {assembler} into {valid_path}') for i in range(n_need): if '+' in chrN_flag: chrN = chrN_flag chr_sim_path = os.path.join(combo_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_r'): chrN = chrN_flag[:-2] chr_sim_path = os.path.join(real_path, 'chm13_chromosomes', chrN, assembler) j = 0 # elif chrN_flag.endswith('_pbs'): # chrN = chrN_flag[:-4] # chr_sim_path = os.path.join(pbsim_path, chrN, assembler) # j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_ncbr'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(ncoibor_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_hg002'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(hg002_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_chm13'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(chm13_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_arab'): chrN = chrN_flag[:-5] chr_sim_path = os.path.join(arab_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) elif chrN_flag.endswith('_zmays'): chrN = chrN_flag[:-6] chr_sim_path = os.path.join(zmays_path, chrN, assembler) j = i + train_dict.get(chrN_flag, 0) else: print(f'Give proper suffix!') raise Exception valid_g_to_chr[n_have] = chrN print(f'Copying {chr_sim_path}/processed/{j}.dgl into {valid_path}/processed/{n_have}.dgl') subprocess.run(f'cp {chr_sim_path}/processed/{j}.dgl {valid_path}/processed/{n_have}.dgl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_succ.pkl {valid_path}/info/{n_have}_succ.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_pred.pkl {valid_path}/info/{n_have}_pred.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_edges.pkl {valid_path}/info/{n_have}_edges.pkl', shell=True) # subprocess.run(f'cp {chr_sim_path}/info/{j}_reads.pkl {valid_path}/info/{n_have}_reads.pkl', shell=True) valid_g_to_org_g[n_have] = j n_have += 1 pickle.dump(valid_g_to_chr, open(f'{valid_path}/info/g_to_chr.pkl', 'wb')) pickle.dump(valid_g_to_org_g, open(f'{valid_path}/info/g_to_org_g.pkl', 'wb')) # TODO: FIX THIS !!!!!!!!!!!!!!!!!! train_path = os.path.join(train_path, os.path.pardir) valid_path = os.path.join(valid_path, os.path.pardir) test_path = os.path.join(test_path, os.path.pardir) ################################### return train_path, valid_path, test_path # def predict_baselines(test_path, assembler, out, model_path=None, device='cpu'): # if model_path is None: # model_path = os.path.abspath(f'pretrained/model_{out}.pt') # walks_and_contigs = inference_baselines(test_path, model_path, assembler, device) # walks_per_graph, contigs_per_graph = walks_and_contigs[0], walks_and_contigs[1] # walks_per_graph_ol_len, contigs_per_graph_ol_len = walks_and_contigs[2], walks_and_contigs[3] # walks_per_graph_ol_sim, contigs_per_graph_ol_sim = walks_and_contigs[4], walks_and_contigs[5] # g_to_chr = pickle.load(open(f'{test_path}/info/g_to_chr.pkl', 'rb')) # for idx, (contigs, contigs_ol_len, contigs_ol_sim) in enumerate(zip(contigs_per_graph, contigs_per_graph_ol_len, contigs_per_graph_ol_sim)): # chrN = g_to_chr[idx] # print(f'GNN: Scores') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) # print(f'Baseline: Overlap lengths') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs_ol_len, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) # print(f'Baseline: Overlap similarities') # num_contigs, longest_contig, reconstructed, n50, ng50 = evaluate.quick_evaluation(contigs_ol_sim, chrN) # evaluate.print_summary(test_path, idx, chrN, num_contigs, longest_contig, reconstructed, n50, ng50) def cleanup(train_path, valid_path): subprocess.run(f'rm -rf {train_path}', shell=True) subprocess.run(f'rm -rf {valid_path}', shell=True) def evaluate_real(eval_path, assembler, model_path, asm_path, ref_path, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) procs = [] for i in range(1, 24): if i == 23: i = 'X' print(f'\nChromosome {i}') chr_path = os.path.join(real_path, f'chr{i}') save_path = os.path.join(save_dir, f'chr{i}') if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = os.path.join(ref_path, 'CHM13', 'chromosomes', f'chr{i}.fasta') idx = os.path.join(ref_path, 'CHM13', 'chromosomes', 'indexed', f'chr{i}.fasta.fai') asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) procs.append(p) for p in procs: p.wait() procs = [] for i in range(1, 24): if i == 23: i = 'X' save_path = os.path.join(save_dir, f'chr{i}') idx = os.path.join(ref_path, 'indexed', f'chr{i}.fasta.fai') paf = os.path.join(save_path, f'asm.paf') report = os.path.join(save_path, f'reports', '0_minigraph.txt') p = evaluate.parse_pafs(idx, report, paf) procs.append(p) for p in procs: p.wait() evaluate.parse_minigraph_for_chrs(save_dir) def evaluate_synth(eval_path, assembler, model_path, asm_path, ref_path, save_dir): synth_path = os.path.join(eval_path, 'synth') save_dir = os.path.join(asm_path, 'synth', assembler, save_dir) procs = [] for i in range(1, 24): if i == 23: i = 'X' print(f'\nChromosome {i}') chr_path = os.path.join(synth_path, f'chr{i}') save_path = os.path.join(save_dir, f'chr{i}') if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = os.path.join(ref_path, 'CHM13', 'chromosomes', f'chr{i}.fasta') idx = os.path.join(ref_path, 'CHM13', 'chromosomes', 'indexed', f'chr{i}.fasta.fai') asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) procs.append(p) for p in procs: p.wait() procs = [] for i in range(1, 24): if i == 23: i = 'X' save_path = os.path.join(save_dir, f'chr{i}') idx = os.path.join(ref_path, 'indexed', f'chr{i}.fasta.fai') paf = os.path.join(save_path, f'asm.paf') report = os.path.join(save_path, f'reports', '0_minigraph.txt') p = evaluate.parse_pafs(idx, report, paf) procs.append(p) for p in procs: p.wait() evaluate.parse_minigraph_for_chrs(save_dir) def evaluate_genome(eval_path, assembler, model_path, asm_path, ref_path, genome, save_dir): real_path = os.path.join(eval_path, 'real') save_dir = os.path.join(asm_path, 'real', assembler, save_dir) print(f'New genome') chr_path = os.path.join(real_path, genome) save_path = os.path.join(save_dir, genome) if not os.path.isdir(save_path): os.makedirs(save_path) os.mkdir(os.path.join(save_path, f'assembly')) os.mkdir(os.path.join(save_path, f'decode')) os.mkdir(os.path.join(save_path, f'reports')) inference(chr_path, model_path, assembler, save_path) ref = ref_path idx = ref_path + '.fai' asm = os.path.join(save_path, f'assembly', f'0_assembly.fasta') report = os.path.join(save_path, f'reports', '0_minigraph.txt') paf = os.path.join(save_path, f'asm.paf') p = evaluate.run_minigraph(ref, asm, paf) p.wait() p = evaluate.parse_pafs(idx, report, paf) p.wait() evaluate.parse_minigraph_for_full(save_dir) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--out', type=str, default=None, help='Output name for models') parser.add_argument('--overfit', action='store_true', default=False, help='Overfit on the chromosomes in the train directory') args = parser.parse_args() out = args.out overfit = args.overfit hyperparams = hyperparameters.get_hyperparameters() data_path = hyperparams['data_path'] # Location of the master database (storage) temp_path = hyperparams['temp_path'] # Location where the data will be temporarily stored for training eval_path = hyperparams['eval_path'] # Location where the synth and real evaluation data is stored refs_path = hyperparams['refs_path'] # Location where the references are stored - local because everythin else can be generated from this asms_path = hyperparams['asms_path'] # Where the assemblies and other inference info will be stored assembler = hyperparams['assembler'] # Which assembler we are using, currently: raven/hifiasm models_path = hyperparams['models_path'] threads = hyperparams['num_threads'] # dataset = hyperparams['dataset'] # Which dataset we are using, currently it's only chm13 data_path_ont = hyperparams['data_path_ont'] eval_path_ont = hyperparams['eval_path_ont'] initials = hyperparams['initials'] time_start = datetime.now() if out is None: timestamp = time_start.strftime('%Y-%b-%d-%H-%M-%S') out = f'{timestamp}_{initials}' else: timestamp = time_start.strftime('%y-%m-%d') out = f'{timestamp}_{initials}_{out}' # Model name must start with the date when the model was trained, in the yy-mm-dd format # Following is the underscore and a name of the model # E.g., 22-10-31_modelA # All the info about the training (train/valid data, hyperparameters, etc.) should be stored in the logbook # You can also include them in the model name, but they NEED to be stored in the logbook! model_name = out # In the inference, model_name represents the model used for evaluation # All the inference data (predictions, walks, assemblies, and reports) # Will be stored in a directory with name {model_name}_{decoding} # Suffix should indicate info about the decoding strategy = hyperparams['strategy'] B = hyperparams['B'] num_decoding_paths = hyperparams['num_decoding_paths'] if strategy == 'greedy': save_dir = f'{model_name}_Gx{num_decoding_paths}' elif strategy == 'beamsearch': save_dir = f'{model_name}_B{B}x{num_decoding_paths}' dicts = train_valid_chrs.get_config() train_dict = dicts['train_dict'] valid_dict = dicts['valid_dict'] test_dict = dicts['test_dict'] train_dict_ont = dicts['train_dict_ont'] valid_dict_ont = dicts['valid_dict_ont'] test_dict_ont = {} specs = { 'threads': threads, 'filter': 0.99, 'out': 'assembly.fasta', 'assembler': assembler, } torch.set_num_threads(threads) model_path = os.path.join(models_path, f'model_{model_name}.pt') all_chr = merge_dicts(train_dict, valid_dict, test_dict) all_chr_ont = merge_dicts(train_dict_ont, valid_dict_ont) # file_structure_setup(data_path, refs_path) # download_reference(refs_path) # simulate_reads_hifi(data_path, refs_path, all_chr, assembler) # simulate_reads_combo(data_path, refs_path, all_chr, assembler) # generate_graphs_hifi(data_path, all_chr, assembler) # simulate_reads_ont(data_path_ont, refs_path, all_chr_ont, 'raven') # generate_graphs_ont(data_path_ont, all_chr_ont, 'raven') # exit(0) if overfit: train_path, valid_path, test_path = train_valid_split(data_path, eval_path, temp_path, assembler, train_dict, valid_dict, test_dict, out, overfit=True)
train(train_path, valid_path, out, assembler, overfit)
0
2023-12-08 04:45:45+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
[ { "identifier": "BBoxHead", "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py", "snippet": "class BBoxHead(BaseModule):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively.\"\"\"\n\n def __init__(self,\n with_avg_pool: bool = False,\n with_cls: bool = True,\n with_reg: bool = True,\n roi_feat_size: int = 7,\n in_channels: int = 256,\n num_classes: int = 80,\n bbox_coder: ConfigType = dict(\n type='DeltaXYWHBBoxCoder',\n clip_border=True,\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n predict_box_type: str = 'hbox',\n reg_class_agnostic: bool = False,\n reg_decoded_bbox: bool = False,\n reg_predictor_cfg: ConfigType = dict(type='Linear'),\n cls_predictor_cfg: ConfigType = dict(type='Linear'),\n loss_cls: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox: ConfigType = dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n init_cfg: OptMultiConfig = None) -> None:\n super().__init__(init_cfg=init_cfg)\n assert with_cls or with_reg\n self.with_avg_pool = with_avg_pool\n self.with_cls = with_cls\n self.with_reg = with_reg\n self.roi_feat_size = _pair(roi_feat_size)\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.predict_box_type = predict_box_type\n self.reg_class_agnostic = reg_class_agnostic\n self.reg_decoded_bbox = reg_decoded_bbox\n self.reg_predictor_cfg = reg_predictor_cfg\n self.cls_predictor_cfg = cls_predictor_cfg\n\n self.bbox_coder = TASK_UTILS.build(bbox_coder)\n self.loss_cls = MODELS.build(loss_cls)\n self.loss_bbox = MODELS.build(loss_bbox)\n\n in_channels = self.in_channels\n if self.with_avg_pool:\n self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n else:\n in_channels *= self.roi_feat_area\n if self.with_cls:\n # need to add background class\n if self.custom_cls_channels:\n cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n else:\n cls_channels = num_classes + 1\n cls_predictor_cfg_ = self.cls_predictor_cfg.copy()\n cls_predictor_cfg_.update(\n in_features=in_channels, out_features=cls_channels)\n self.fc_cls = MODELS.build(cls_predictor_cfg_)\n if self.with_reg:\n box_dim = self.bbox_coder.encode_size\n out_dim_reg = box_dim if reg_class_agnostic else \\\n box_dim * num_classes\n reg_predictor_cfg_ = self.reg_predictor_cfg.copy()\n if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):\n reg_predictor_cfg_.update(\n in_features=in_channels, out_features=out_dim_reg)\n self.fc_reg = MODELS.build(reg_predictor_cfg_)\n self.debug_imgs = None\n if init_cfg is None:\n self.init_cfg = []\n if self.with_cls:\n self.init_cfg += [\n dict(\n type='Normal', std=0.01, override=dict(name='fc_cls'))\n ]\n if self.with_reg:\n self.init_cfg += [\n dict(\n type='Normal', std=0.001, override=dict(name='fc_reg'))\n ]\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_cls_channels(self) -> bool:\n \"\"\"get custom_cls_channels from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_cls_channels', False)\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_activation(self) -> bool:\n \"\"\"get custom_activation from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_activation', False)\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_accuracy(self) -> bool:\n \"\"\"get custom_accuracy from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_accuracy', False)\n\n def forward(self, x: Tuple[Tensor]) -> tuple:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: A tuple of classification scores and bbox prediction.\n\n - cls_score (Tensor): Classification scores for all\n scale levels, each is a 4D-tensor, the channels number\n is num_base_priors * num_classes.\n - bbox_pred (Tensor): Box energies / deltas for all\n scale levels, each is a 4D-tensor, the channels number\n is num_base_priors * 4.\n \"\"\"\n if self.with_avg_pool:\n if x.numel() > 0:\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n else:\n # avg_pool does not support empty tensor,\n # so use torch.mean instead it\n x = torch.mean(x, dim=(-1, -2))\n cls_score = self.fc_cls(x) if self.with_cls else None\n bbox_pred = self.fc_reg(x) if self.with_reg else None\n return cls_score, bbox_pred\n\n def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor,\n pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,\n cfg: ConfigDict) -> tuple:\n \"\"\"Calculate the ground truth for proposals in the single image\n according to the sampling results.\n\n Args:\n pos_priors (Tensor): Contains all the positive boxes,\n has shape (num_pos, 4), the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n neg_priors (Tensor): Contains all the negative boxes,\n has shape (num_neg, 4), the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n pos_gt_bboxes (Tensor): Contains gt_boxes for\n all positive samples, has shape (num_pos, 4),\n the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n pos_gt_labels (Tensor): Contains gt_labels for\n all positive samples, has shape (num_pos, ).\n cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n Returns:\n Tuple[Tensor]: Ground truth for proposals\n in a single image. Containing the following Tensors:\n\n - labels(Tensor): Gt_labels for all proposals, has\n shape (num_proposals,).\n - label_weights(Tensor): Labels_weights for all\n proposals, has shape (num_proposals,).\n - bbox_targets(Tensor):Regression target for all\n proposals, has shape (num_proposals, 4), the\n last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n - bbox_weights(Tensor):Regression weights for all\n proposals, has shape (num_proposals, 4).\n \"\"\"\n num_pos = pos_priors.size(0)\n num_neg = neg_priors.size(0)\n num_samples = num_pos + num_neg\n\n # original implementation uses new_zeros since BG are set to be 0\n # now use empty & fill because BG cat_id = num_classes,\n # FG cat_id = [0, num_classes-1]\n labels = pos_priors.new_full((num_samples, ),\n self.num_classes,\n dtype=torch.long)\n reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \\\n else self.bbox_coder.encode_size\n label_weights = pos_priors.new_zeros(num_samples)\n bbox_targets = pos_priors.new_zeros(num_samples, reg_dim)\n bbox_weights = pos_priors.new_zeros(num_samples, reg_dim)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n if not self.reg_decoded_bbox:\n pos_bbox_targets = self.bbox_coder.encode(\n pos_priors, pos_gt_bboxes)\n else:\n # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n # is applied directly on the decoded bounding boxes, both\n # the predicted boxes and regression targets should be with\n # absolute coordinate format.\n pos_bbox_targets = get_box_tensor(pos_gt_bboxes)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights\n\n def get_targets(self,\n sampling_results: List[SamplingResult],\n rcnn_train_cfg: ConfigDict,\n concat: bool = True) -> tuple:\n \"\"\"Calculate the ground truth for all samples in a batch according to\n the sampling_results.\n\n Almost the same as the implementation in bbox_head, we passed\n additional parameters pos_inds_list and neg_inds_list to\n `_get_targets_single` function.\n\n Args:\n sampling_results (List[obj:SamplingResult]): Assign results of\n all images in a batch after sampling.\n rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n concat (bool): Whether to concatenate the results of all\n the images in a single batch.\n\n Returns:\n Tuple[Tensor]: Ground truth for proposals in a single image.\n Containing the following list of Tensors:\n\n - labels (list[Tensor],Tensor): Gt_labels for all\n proposals in a batch, each tensor in list has\n shape (num_proposals,) when `concat=False`, otherwise\n just a single tensor has shape (num_all_proposals,).\n - label_weights (list[Tensor]): Labels_weights for\n all proposals in a batch, each tensor in list has\n shape (num_proposals,) when `concat=False`, otherwise\n just a single tensor has shape (num_all_proposals,).\n - bbox_targets (list[Tensor],Tensor): Regression target\n for all proposals in a batch, each tensor in list\n has shape (num_proposals, 4) when `concat=False`,\n otherwise just a single tensor has shape\n (num_all_proposals, 4), the last dimension 4 represents\n [tl_x, tl_y, br_x, br_y].\n - bbox_weights (list[tensor],Tensor): Regression weights for\n all proposals in a batch, each tensor in list has shape\n (num_proposals, 4) when `concat=False`, otherwise just a\n single tensor has shape (num_all_proposals, 4).\n \"\"\"\n pos_priors_list = [res.pos_priors for res in sampling_results]\n neg_priors_list = [res.neg_priors for res in sampling_results]\n pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n self._get_targets_single,\n pos_priors_list,\n neg_priors_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=rcnn_train_cfg)\n\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\n def loss_and_target(self,\n cls_score: Tensor,\n bbox_pred: Tensor,\n rois: Tensor,\n sampling_results: List[SamplingResult],\n rcnn_train_cfg: ConfigDict,\n concat: bool = True,\n reduction_override: Optional[str] = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the bbox head.\n\n Args:\n cls_score (Tensor): Classification prediction\n results of all class, has shape\n (batch_size * num_proposals_single_image, num_classes)\n bbox_pred (Tensor): Regression prediction results,\n has shape\n (batch_size * num_proposals_single_image, 4), the last\n dimension 4 represents [tl_x, tl_y, br_x, br_y].\n rois (Tensor): RoIs with the shape\n (batch_size * num_proposals_single_image, 5) where the first\n column indicates batch id of each RoI.\n sampling_results (List[obj:SamplingResult]): Assign results of\n all images in a batch after sampling.\n rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n concat (bool): Whether to concatenate the results of all\n the images in a single batch. Defaults to True.\n reduction_override (str, optional): The reduction\n method used to override the original reduction\n method of the loss. Options are \"none\",\n \"mean\" and \"sum\". Defaults to None,\n\n Returns:\n dict: A dictionary of loss and targets components.\n The targets are only used for cascade rcnn.\n \"\"\"\n\n cls_reg_targets = self.get_targets(\n sampling_results, rcnn_train_cfg, concat=concat)\n losses = self.loss(\n cls_score,\n bbox_pred,\n rois,\n *cls_reg_targets,\n reduction_override=reduction_override)\n\n # cls_reg_targets is only for cascade rcnn\n return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)\n\n def loss(self,\n cls_score: Tensor,\n bbox_pred: Tensor,\n rois: Tensor,\n labels: Tensor,\n label_weights: Tensor,\n bbox_targets: Tensor,\n bbox_weights: Tensor,\n reduction_override: Optional[str] = None) -> dict:\n \"\"\"Calculate the loss based on the network predictions and targets.\n\n Args:\n cls_score (Tensor): Classification prediction\n results of all class, has shape\n (batch_size * num_proposals_single_image, num_classes)\n bbox_pred (Tensor): Regression prediction results,\n has shape\n (batch_size * num_proposals_single_image, 4), the last\n dimension 4 represents [tl_x, tl_y, br_x, br_y].\n rois (Tensor): RoIs with the shape\n (batch_size * num_proposals_single_image, 5) where the first\n column indicates batch id of each RoI.\n labels (Tensor): Gt_labels for all proposals in a batch, has\n shape (batch_size * num_proposals_single_image, ).\n label_weights (Tensor): Labels_weights for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, ).\n bbox_targets (Tensor): Regression target for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, 4),\n the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n bbox_weights (Tensor): Regression weights for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, 4).\n reduction_override (str, optional): The reduction\n method used to override the original reduction\n method of the loss. Options are \"none\",\n \"mean\" and \"sum\". Defaults to None,\n\n Returns:\n dict: A dictionary of loss.\n \"\"\"\n\n losses = dict()\n\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n loss_cls_ = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n if isinstance(loss_cls_, dict):\n losses.update(loss_cls_)\n else:\n losses['loss_cls'] = loss_cls_\n if self.custom_activation:\n acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n losses.update(acc_)\n else:\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n bg_class_ind = self.num_classes\n # 0~self.num_classes-1 are FG, self.num_classes is BG\n pos_inds = (labels >= 0) & (labels < bg_class_ind)\n # do not perform bounding box regression for BG anymore.\n if pos_inds.any():\n if self.reg_decoded_bbox:\n # When the regression loss (e.g. `IouLoss`,\n # `GIouLoss`, `DIouLoss`) is applied directly on\n # the decoded bounding boxes, it decodes the\n # already encoded coordinates to absolute format.\n bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n bbox_pred = get_box_tensor(bbox_pred)\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), self.num_classes,\n -1)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n else:\n losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n\n return losses\n\n def predict_by_feat(self,\n rois: Tuple[Tensor],\n cls_scores: Tuple[Tensor],\n bbox_preds: Tuple[Tensor],\n batch_img_metas: List[dict],\n rcnn_test_cfg: Optional[ConfigDict] = None,\n rescale: bool = False) -> InstanceList:\n \"\"\"Transform a batch of output features extracted from the head into\n bbox results.\n\n Args:\n rois (tuple[Tensor]): Tuple of boxes to be transformed.\n Each has shape (num_boxes, 5). last dimension 5 arrange as\n (batch_index, x1, y1, x2, y2).\n cls_scores (tuple[Tensor]): Tuple of box scores, each has shape\n (num_boxes, num_classes + 1).\n bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each\n has shape (num_boxes, num_classes * 4).\n batch_img_metas (list[dict]): List of image information.\n rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.\n Defaults to None.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Instance segmentation\n results of each image after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n \"\"\"\n assert len(cls_scores) == len(bbox_preds)\n result_list = []\n for img_id in range(len(batch_img_metas)):\n img_meta = batch_img_metas[img_id]\n results = self._predict_by_feat_single(\n roi=rois[img_id],\n cls_score=cls_scores[img_id],\n bbox_pred=bbox_preds[img_id],\n img_meta=img_meta,\n rescale=rescale,\n rcnn_test_cfg=rcnn_test_cfg)\n result_list.append(results)\n\n return result_list\n\n def _predict_by_feat_single(\n self,\n roi: Tensor,\n cls_score: Tensor,\n bbox_pred: Tensor,\n img_meta: dict,\n rescale: bool = False,\n rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n \"\"\"Transform a single image's features extracted from the head into\n bbox results.\n\n Args:\n roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n cls_score (Tensor): Box scores, has shape\n (num_boxes, num_classes + 1).\n bbox_pred (Tensor): Box energies / deltas.\n has shape (num_boxes, num_classes * 4).\n img_meta (dict): image information.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n Defaults to None\n\n Returns:\n :obj:`InstanceData`: Detection results of each image\\\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n \"\"\"\n results = InstanceData()\n if roi.shape[0] == 0:\n return empty_instances([img_meta],\n roi.device,\n task_type='bbox',\n instance_results=[results],\n box_type=self.predict_box_type,\n use_box_type=False,\n num_classes=self.num_classes,\n score_per_cls=rcnn_test_cfg is None)[0]\n\n # some loss (Seesaw loss..) may have custom activation\n if self.custom_cls_channels:\n scores = self.loss_cls.get_activation(cls_score)\n else:\n scores = F.softmax(\n cls_score, dim=-1) if cls_score is not None else None\n\n img_shape = img_meta['img_shape']\n num_rois = roi.size(0)\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n num_classes = 1 if self.reg_class_agnostic else self.num_classes\n roi = roi.repeat_interleave(num_classes, dim=0)\n bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)\n bboxes = self.bbox_coder.decode(\n roi[..., 1:], bbox_pred, max_shape=img_shape)\n else:\n bboxes = roi[:, 1:].clone()\n if img_shape is not None and bboxes.size(-1) == 4:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n\n if rescale and bboxes.size(0) > 0:\n assert img_meta.get('scale_factor') is not None\n scale_factor = [1 / s for s in img_meta['scale_factor']]\n bboxes = scale_boxes(bboxes, scale_factor)\n\n # Get the inside tensor when `bboxes` is a box type\n bboxes = get_box_tensor(bboxes)\n box_dim = bboxes.size(-1)\n bboxes = bboxes.view(num_rois, -1)\n\n if rcnn_test_cfg is None:\n # This means that it is aug test.\n # It needs to return the raw results without nms.\n results.bboxes = bboxes\n results.scores = scores\n else:\n det_bboxes, det_labels = multiclass_nms(\n bboxes,\n scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img,\n box_dim=box_dim)\n results.bboxes = det_bboxes[:, :-1]\n results.scores = det_bboxes[:, -1]\n results.labels = det_labels\n return results\n\n def refine_bboxes(self, sampling_results: Union[List[SamplingResult],\n InstanceList],\n bbox_results: dict,\n batch_img_metas: List[dict]) -> InstanceList:\n \"\"\"Refine bboxes during training.\n\n Args:\n sampling_results (List[:obj:`SamplingResult`] or\n List[:obj:`InstanceData`]): Sampling results.\n :obj:`SamplingResult` is the real sampling results\n calculate from bbox_head, while :obj:`InstanceData` is\n fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc.\n bbox_results (dict): Usually is a dictionary with keys:\n\n - `cls_score` (Tensor): Classification scores.\n - `bbox_pred` (Tensor): Box energies / deltas.\n - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n column indicates batch id of each RoI.\n - `bbox_targets` (tuple): Ground truth for proposals in a\n single image. Containing the following list of Tensors:\n (labels, label_weights, bbox_targets, bbox_weights)\n batch_img_metas (List[dict]): List of image information.\n\n Returns:\n list[:obj:`InstanceData`]: Refined bboxes of each image.\n\n Example:\n >>> # xdoctest: +REQUIRES(module:kwarray)\n >>> import numpy as np\n >>> from mmdet.models.task_modules.samplers.\n ... sampling_result import random_boxes\n >>> from mmdet.models.task_modules.samplers import SamplingResult\n >>> self = BBoxHead(reg_class_agnostic=True)\n >>> n_roi = 2\n >>> n_img = 4\n >>> scale = 512\n >>> rng = np.random.RandomState(0)\n ... batch_img_metas = [{'img_shape': (scale, scale)}\n >>> for _ in range(n_img)]\n >>> sampling_results = [SamplingResult.random(rng=10)\n ... for _ in range(n_img)]\n >>> # Create rois in the expected format\n >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n >>> img_ids = torch.randint(0, n_img, (n_roi,))\n >>> img_ids = img_ids.float()\n >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n >>> # Create other args\n >>> labels = torch.randint(0, 81, (scale,)).long()\n >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n >>> cls_score = torch.randn((scale, 81))\n ... # For each image, pretend random positive boxes are gts\n >>> bbox_targets = (labels, None, None, None)\n ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds,\n ... cls_score=cls_score,\n ... bbox_targets=bbox_targets)\n >>> bboxes_list = self.refine_bboxes(sampling_results,\n ... bbox_results,\n ... batch_img_metas)\n >>> print(bboxes_list)\n \"\"\"\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n labels = bbox_results['bbox_targets'][0]\n cls_scores = bbox_results['cls_score']\n rois = bbox_results['rois']\n bbox_preds = bbox_results['bbox_pred']\n if self.custom_activation:\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n cls_scores = self.loss_cls.get_activation(cls_scores)\n if cls_scores.numel() == 0:\n return None\n if cls_scores.shape[-1] == self.num_classes + 1:\n # remove background class\n cls_scores = cls_scores[:, :-1]\n elif cls_scores.shape[-1] != self.num_classes:\n raise ValueError('The last dim of `cls_scores` should equal to '\n '`num_classes` or `num_classes + 1`,'\n f'but got {cls_scores.shape[-1]}.')\n labels = torch.where(labels == self.num_classes, cls_scores.argmax(1),\n labels)\n\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() <= len(batch_img_metas)\n\n results_list = []\n for i in range(len(batch_img_metas)):\n inds = torch.nonzero(\n rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n bbox_pred_ = bbox_preds[inds]\n img_meta_ = batch_img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n img_meta_)\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])\n results_list.append(results)\n\n return results_list\n\n def regress_by_class(self, priors: Tensor, label: Tensor,\n bbox_pred: Tensor, img_meta: dict) -> Tensor:\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n priors (Tensor): Priors from `rpn_head` or last stage\n `bbox_head`, has shape (num_proposals, 4).\n label (Tensor): Only used when `self.reg_class_agnostic`\n is False, has shape (num_proposals, ).\n bbox_pred (Tensor): Regression prediction of\n current stage `bbox_head`. When `self.reg_class_agnostic`\n is False, it has shape (n, num_classes * 4), otherwise\n it has shape (n, 4).\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n reg_dim = self.bbox_coder.encode_size\n if not self.reg_class_agnostic:\n label = label * reg_dim\n inds = torch.stack([label + i for i in range(reg_dim)], 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size()[1] == reg_dim\n\n max_shape = img_meta['img_shape']\n regressed_bboxes = self.bbox_coder.decode(\n priors, bbox_pred, max_shape=max_shape)\n return regressed_bboxes" }, { "identifier": "SamplingResult", "path": "mmdet/models/task_modules/samplers/sampling_result.py", "snippet": "class SamplingResult(util_mixins.NiceRepr):\n \"\"\"Bbox sampling result.\n\n Args:\n pos_inds (Tensor): Indices of positive samples.\n neg_inds (Tensor): Indices of negative samples.\n priors (Tensor): The priors can be anchors or points,\n or the bboxes predicted by the previous stage.\n gt_bboxes (Tensor): Ground truth of bboxes.\n assign_result (:obj:`AssignResult`): Assigning results.\n gt_flags (Tensor): The Ground truth flags.\n avg_factor_with_neg (bool): If True, ``avg_factor`` equal to\n the number of total priors; Otherwise, it is the number of\n positive priors. Defaults to True.\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random(rng=10)\n >>> print(f'self = {self}')\n self = <SamplingResult({\n 'neg_inds': tensor([1, 2, 3, 5, 6, 7, 8,\n 9, 10, 11, 12, 13]),\n 'neg_priors': torch.Size([12, 4]),\n 'num_gts': 1,\n 'num_neg': 12,\n 'num_pos': 1,\n 'avg_factor': 13,\n 'pos_assigned_gt_inds': tensor([0]),\n 'pos_inds': tensor([0]),\n 'pos_is_gt': tensor([1], dtype=torch.uint8),\n 'pos_priors': torch.Size([1, 4])\n })>\n \"\"\"\n\n def __init__(self,\n pos_inds: Tensor,\n neg_inds: Tensor,\n priors: Tensor,\n gt_bboxes: Tensor,\n assign_result: AssignResult,\n gt_flags: Tensor,\n avg_factor_with_neg: bool = True) -> None:\n self.pos_inds = pos_inds\n self.neg_inds = neg_inds\n self.num_pos = max(pos_inds.numel(), 1)\n self.num_neg = max(neg_inds.numel(), 1)\n self.avg_factor_with_neg = avg_factor_with_neg\n self.avg_factor = self.num_pos + self.num_neg \\\n if avg_factor_with_neg else self.num_pos\n self.pos_priors = priors[pos_inds]\n self.neg_priors = priors[neg_inds]\n self.pos_is_gt = gt_flags[pos_inds]\n\n self.num_gts = gt_bboxes.shape[0]\n self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n self.pos_gt_labels = assign_result.labels[pos_inds]\n box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4\n if gt_bboxes.numel() == 0:\n # hack for index error case\n assert self.pos_assigned_gt_inds.numel() == 0\n self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim)\n else:\n if len(gt_bboxes.shape) < 2:\n gt_bboxes = gt_bboxes.view(-1, box_dim)\n self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()]\n\n @property\n def priors(self):\n \"\"\"torch.Tensor: concatenated positive and negative priors\"\"\"\n return cat_boxes([self.pos_priors, self.neg_priors])\n\n @property\n def bboxes(self):\n \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n warnings.warn('DeprecationWarning: bboxes is deprecated, '\n 'please use \"priors\" instead')\n return self.priors\n\n @property\n def pos_bboxes(self):\n warnings.warn('DeprecationWarning: pos_bboxes is deprecated, '\n 'please use \"pos_priors\" instead')\n return self.pos_priors\n\n @property\n def neg_bboxes(self):\n warnings.warn('DeprecationWarning: neg_bboxes is deprecated, '\n 'please use \"neg_priors\" instead')\n return self.neg_priors\n\n def to(self, device):\n \"\"\"Change the device of the data inplace.\n\n Example:\n >>> self = SamplingResult.random()\n >>> print(f'self = {self.to(None)}')\n >>> # xdoctest: +REQUIRES(--gpu)\n >>> print(f'self = {self.to(0)}')\n \"\"\"\n _dict = self.__dict__\n for key, value in _dict.items():\n if isinstance(value, (torch.Tensor, BaseBoxes)):\n _dict[key] = value.to(device)\n return self\n\n def __nice__(self):\n data = self.info.copy()\n data['pos_priors'] = data.pop('pos_priors').shape\n data['neg_priors'] = data.pop('neg_priors').shape\n parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n body = ' ' + ',\\n '.join(parts)\n return '{\\n' + body + '\\n}'\n\n @property\n def info(self):\n \"\"\"Returns a dictionary of info about the object.\"\"\"\n return {\n 'pos_inds': self.pos_inds,\n 'neg_inds': self.neg_inds,\n 'pos_priors': self.pos_priors,\n 'neg_priors': self.neg_priors,\n 'pos_is_gt': self.pos_is_gt,\n 'num_gts': self.num_gts,\n 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n 'num_pos': self.num_pos,\n 'num_neg': self.num_neg,\n 'avg_factor': self.avg_factor\n }\n\n @classmethod\n def random(cls, rng=None, **kwargs):\n \"\"\"\n Args:\n rng (None | int | numpy.random.RandomState): seed or state.\n kwargs (keyword arguments):\n - num_preds: Number of predicted boxes.\n - num_gts: Number of true boxes.\n - p_ignore (float): Probability of a predicted box assigned to\n an ignored truth.\n - p_assigned (float): probability of a predicted box not being\n assigned.\n\n Returns:\n :obj:`SamplingResult`: Randomly generated sampling result.\n\n Example:\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random()\n >>> print(self.__dict__)\n \"\"\"\n from mmengine.structures import InstanceData\n\n from mmdet.models.task_modules.assigners import AssignResult\n from mmdet.models.task_modules.samplers import RandomSampler\n rng = ensure_rng(rng)\n\n # make probabilistic?\n num = 32\n pos_fraction = 0.5\n neg_pos_ub = -1\n\n assign_result = AssignResult.random(rng=rng, **kwargs)\n\n # Note we could just compute an assignment\n priors = random_boxes(assign_result.num_preds, rng=rng)\n gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)\n gt_labels = torch.randint(\n 0, 5, (assign_result.num_gts, ), dtype=torch.long)\n\n pred_instances = InstanceData()\n pred_instances.priors = priors\n\n gt_instances = InstanceData()\n gt_instances.bboxes = gt_bboxes\n gt_instances.labels = gt_labels\n\n add_gt_as_proposals = True\n\n sampler = RandomSampler(\n num,\n pos_fraction,\n neg_pos_ub=neg_pos_ub,\n add_gt_as_proposals=add_gt_as_proposals,\n rng=rng)\n self = sampler.sample(\n assign_result=assign_result,\n pred_instances=pred_instances,\n gt_instances=gt_instances)\n return self" }, { "identifier": "empty_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def empty_instances(batch_img_metas: List[dict],\n device: torch.device,\n task_type: str,\n instance_results: OptInstanceList = None,\n mask_thr_binary: Union[int, float] = 0,\n box_type: Union[str, type] = 'hbox',\n use_box_type: bool = False,\n num_classes: int = 80,\n score_per_cls: bool = False) -> List[InstanceData]:\n \"\"\"Handle predicted instances when RoI is empty.\n\n Note: If ``instance_results`` is not None, it will be modified\n in place internally, and then return ``instance_results``\n\n Args:\n batch_img_metas (list[dict]): List of image information.\n device (torch.device): Device of tensor.\n task_type (str): Expected returned task type. it currently\n supports bbox and mask.\n instance_results (list[:obj:`InstanceData`]): List of instance\n results.\n mask_thr_binary (int, float): mask binarization threshold.\n Defaults to 0.\n box_type (str or type): The empty box type. Defaults to `hbox`.\n use_box_type (bool): Whether to warp boxes with the box type.\n Defaults to False.\n num_classes (int): num_classes of bbox_head. Defaults to 80.\n score_per_cls (bool): Whether to generate classwise score for\n the empty instance. ``score_per_cls`` will be True when the model\n needs to produce raw results without nms. Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Detection results of each image\n \"\"\"\n assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \\\n f' but got {task_type}'\n\n if instance_results is not None:\n assert len(instance_results) == len(batch_img_metas)\n\n results_list = []\n for img_id in range(len(batch_img_metas)):\n if instance_results is not None:\n results = instance_results[img_id]\n assert isinstance(results, InstanceData)\n else:\n results = InstanceData()\n\n if task_type == 'bbox':\n _, box_type = get_box_type(box_type)\n bboxes = torch.zeros(0, box_type.box_dim, device=device)\n if use_box_type:\n bboxes = box_type(bboxes, clone=False)\n results.bboxes = bboxes\n score_shape = (0, num_classes + 1) if score_per_cls else (0, )\n results.scores = torch.zeros(score_shape, device=device)\n results.labels = torch.zeros((0, ),\n device=device,\n dtype=torch.long)\n else:\n # TODO: Handle the case where rescale is false\n img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2]\n # the type of `im_mask` will be torch.bool or torch.uint8,\n # where uint8 if for visualization and debugging.\n im_mask = torch.zeros(\n 0,\n img_h,\n img_w,\n device=device,\n dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8)\n results.masks = im_mask\n results_list.append(results)\n return results_list" }, { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "bbox_overlaps", "path": "mmdet/structures/bbox/bbox_overlaps.py", "snippet": "def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n \"\"\"Calculate overlap between two set of bboxes.\n\n FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n Note:\n Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n there are some new generated variable when calculating IOU\n using bbox_overlaps function:\n\n 1) is_aligned is False\n area1: M x 1\n area2: N x 1\n lt: M x N x 2\n rb: M x N x 2\n wh: M x N x 2\n overlap: M x N x 1\n union: M x N x 1\n ious: M x N x 1\n\n Total memory:\n S = (9 x N x M + N + M) * 4 Byte,\n\n When using FP16, we can reduce:\n R = (9 x N x M + N + M) * 4 / 2 Byte\n R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n N + 1 < 3 * N, when N or M is 1.\n\n Given M = 40 (ground truth), N = 400000 (three anchor boxes\n in per grid, FPN, R-CNNs),\n R = 275 MB (one times)\n\n A special case (dense detection), M = 512 (ground truth),\n R = 3516 MB = 3.43 GB\n\n When the batch size is B, reduce:\n B x R\n\n Therefore, CUDA memory runs out frequently.\n\n Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n | dtype | M | N | Use | Real | Ideal |\n |:----:|:----:|:----:|:----:|:----:|:----:|\n | FP32 | 512 | 400000 | 8020 MiB | -- | -- |\n | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |\n | FP32 | 40 | 400000 | 1540 MiB | -- | -- |\n | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |\n\n 2) is_aligned is True\n area1: N x 1\n area2: N x 1\n lt: N x 2\n rb: N x 2\n wh: N x 2\n overlap: N x 1\n union: N x 1\n ious: N x 1\n\n Total memory:\n S = 11 x N * 4 Byte\n\n When using FP16, we can reduce:\n R = 11 x N * 4 / 2 Byte\n\n So do the 'giou' (large than 'iou').\n\n Time-wise, FP16 is generally faster than FP32.\n\n When gpu_assign_thr is not -1, it takes more time on cpu\n but not reduce memory.\n There, we can reduce half the memory and keep the speed.\n\n If ``is_aligned`` is ``False``, then calculate the overlaps between each\n bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n pair of bboxes1 and bboxes2.\n\n Args:\n bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n B indicates the batch dim, in shape (B1, B2, ..., Bn).\n If ``is_aligned`` is ``True``, then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n >>> assert overlaps.shape == (3, 3)\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n >>> assert overlaps.shape == (3, )\n\n Example:\n >>> empty = torch.empty(0, 4)\n >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n \"\"\"\n\n assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n # Batch dim must be the same\n # Batch dim: (B1, B2, ... Bn)\n assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n batch_shape = bboxes1.shape[:-2]\n\n rows = bboxes1.size(-2)\n cols = bboxes2.size(-2)\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new(batch_shape + (rows, ))\n else:\n return bboxes1.new(batch_shape + (rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1])\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1])\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2]) # [B, rows, cols, 2]\n rb = torch.min(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2])\n enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in ['iou', 'iof']:\n return ious\n # calculate gious\n enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious" } ]
from typing import List, Optional, Tuple, Union from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor, nn from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps import numpy as np import torch import torch.nn.functional as F
15,729
bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class MultiInstanceBBoxHead(BBoxHead): r"""Bbox head used in CrowdDet. .. code-block:: none /-> cls convs_1 -> cls fcs_1 -> cls_1 |-- | \-> reg convs_1 -> reg fcs_1 -> reg_1 | | /-> cls convs_2 -> cls fcs_2 -> cls_2 shared convs -> shared fcs |-- | \-> reg convs_2 -> reg fcs_2 -> reg_2 | | ... | | /-> cls convs_k -> cls fcs_k -> cls_k |-- \-> reg convs_k -> reg fcs_k -> reg_k Args: num_instance (int): The number of branches after shared fcs. Defaults to 2. with_refine (bool): Whether to use refine module. Defaults to False. num_shared_convs (int): The number of shared convs. Defaults to 0. num_shared_fcs (int): The number of shared fcs. Defaults to 2. num_cls_convs (int): The number of cls convs. Defaults to 0. num_cls_fcs (int): The number of cls fcs. Defaults to 0. num_reg_convs (int): The number of reg convs. Defaults to 0. num_reg_fcs (int): The number of reg fcs. Defaults to 0. conv_out_channels (int): The number of conv out channels. Defaults to 256. fc_out_channels (int): The number of fc out channels. Defaults to 1024. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ # noqa: W605 def __init__(self, num_instance: int = 2, with_refine: bool = False, num_shared_convs: int = 0, num_shared_fcs: int = 2, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) assert num_instance == 2, 'Currently only 2 instances are supported' if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_instance = num_instance self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.with_refine = with_refine # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim self.relu = nn.ReLU(inplace=True) if self.with_refine: refine_model_cfg = { 'type': 'Linear', 'in_features': self.shared_out_channels + 20, 'out_features': self.shared_out_channels } self.shared_fcs_ref = MODELS.build(refine_model_cfg) self.fc_cls_ref = nn.ModuleList() self.fc_reg_ref = nn.ModuleList() self.cls_convs = nn.ModuleList() self.cls_fcs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.reg_fcs = nn.ModuleList() self.cls_last_dim = list() self.reg_last_dim = list() self.fc_cls = nn.ModuleList() self.fc_reg = nn.ModuleList() for k in range(self.num_instance): # add cls specific branch cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) self.cls_convs.append(cls_convs) self.cls_fcs.append(cls_fcs) self.cls_last_dim.append(cls_last_dim) # add reg specific branch reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) self.reg_convs.append(reg_convs) self.reg_fcs.append(reg_fcs) self.reg_last_dim.append(reg_last_dim) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels( self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy cls_predictor_cfg_.update( in_features=self.cls_last_dim[k], out_features=cls_channels) self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) if self.with_refine: self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) reg_predictor_cfg_ = self.reg_predictor_cfg.copy() reg_predictor_cfg_.update( in_features=self.reg_last_dim[k], out_features=out_dim_reg) self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) if self.with_refine: self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - cls_score_ref (Tensor): The cls_score after refine model. - bbox_pred_ref (Tensor): The bbox_pred after refine model. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x # separate branches cls_score = list() bbox_pred = list() for k in range(self.num_instance): for conv in self.cls_convs[k]: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs[k]: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs[k]: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs[k]: x_reg = self.relu(fc(x_reg)) cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) if self.with_refine: x_ref = x cls_score_ref = list() bbox_pred_ref = list() for k in range(self.num_instance): feat_ref = cls_score[k].softmax(dim=-1) feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), dim=1).repeat(1, 4) feat_ref = torch.cat((x_ref, feat_ref), dim=1) feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) cls_score_ref = torch.cat(cls_score_ref, dim=1) bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ labels = [] bbox_targets = [] bbox_weights = [] label_weights = [] for i in range(len(sampling_results)): sample_bboxes = torch.cat([ sampling_results[i].pos_gt_bboxes, sampling_results[i].neg_gt_bboxes ]) sample_priors = sampling_results[i].priors sample_priors = sample_priors.repeat(1, self.num_instance).reshape( -1, 4) sample_bboxes = sample_bboxes.reshape(-1, 4) if not self.reg_decoded_bbox: _bbox_targets = self.bbox_coder.encode(sample_priors, sample_bboxes) else: _bbox_targets = sample_priors _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) _bbox_weights = torch.ones(_bbox_targets.shape) _labels = torch.cat([ sampling_results[i].pos_gt_labels, sampling_results[i].neg_gt_labels ]) _labels_weights = torch.ones(_labels.shape) bbox_targets.append(_bbox_targets) bbox_weights.append(_bbox_weights) labels.append(_labels) label_weights.append(_labels_weights) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, **kwargs) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, (num_classes + 1) * k), k represents the number of prediction boxes generated by each proposal box. bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k). Returns: dict: A dictionary of loss. """ losses = dict() if bbox_pred.numel(): loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_targets, labels) loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_targets, labels) loss = torch.cat([loss_0, loss_1], dim=1) _, min_indices = loss.min(dim=1) loss_emd = loss[torch.arange(loss.shape[0]), min_indices] loss_emd = loss_emd.mean() else: loss_emd = bbox_pred.sum() losses['loss_rcnn_emd'] = loss_emd return losses def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, labels: Tensor) -> Tensor: """Calculate the emd loss. Note: This implementation is modified from https://github.com/Purkialo/ CrowdDet/blob/master/lib/det_oprs/loss_opr.py Args: bbox_pred_0 (Tensor): Part of regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. cls_score_0 (Tensor): Part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)), where 1 represents the background. bbox_pred_1 (Tensor): The other part of regression prediction results, has shape (batch_size*num_proposals_single_image, 4). cls_score_1 (Tensor):The other part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)). targets (Tensor):Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y], k represents the number of prediction boxes generated by each proposal box. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). Returns: torch.Tensor: The calculated loss. """ bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], dim=1).reshape(-1, bbox_pred_0.shape[-1]) cls_score = torch.cat([cls_score_0, cls_score_1], dim=1).reshape(-1, cls_score_0.shape[-1]) targets = targets.reshape(-1, 4) labels = labels.long().flatten() # masks valid_masks = labels >= 0 fg_masks = labels > 0 # multiple class bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) fg_gt_classes = labels[fg_masks] bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] # loss for regression loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) loss_bbox = loss_bbox.sum(dim=1) # loss for classification labels = labels * valid_masks loss_cls = self.loss_cls(cls_score, labels) loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox loss = loss_cls.reshape(-1, 2).sum(dim=1) return loss.reshape(-1, 1) def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
overlap = bbox_overlaps(basement_bbox, ruler_bbox)
4
2023-12-11 15:23:03+00:00
24k
open-mmlab/PIA
animatediff/pipelines/i2v_pipeline.py
[ { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" }, { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module = True,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Image to Video Conv\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n @property\n def attn_processors(self) -> Dict[str, AttnProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):\n r\"\"\"\n Parameters:\n `processor (`dict` of `AttnProcessor` or `AttnProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n of **all** `CrossAttention` layers.\n In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.:\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n print(f'Set {module}')\n module.set_processor(processor)\n else:\n print(f'Set {module}')\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n mask_sample: torch.FloatTensor,\n masked_sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n image_embeds: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # image to video b c f h w\n sample = torch.cat([sample, mask_sample, masked_sample], dim=1).to(sample.device)\n\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * - 10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # prepare for ip-adapter\n if image_embeds is not None:\n image_embeds = self.encoder_hid_proj(\n image_embeds).to(encoder_hidden_states.dtype)\n encoder_hidden_states = torch.cat(\n [encoder_hidden_states, image_embeds], dim=1)\n\n # pre-process\n # b c f h w\n # 2 4 16 64 64\n sample = self.conv_in(sample)\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n return text_model_dict" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config, only_decoder=False, only_encoder=False):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n if only_decoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('decoder') or k.startswith('post_quant')}\n elif only_encoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('encoder') or k.startswith('quant')}\n\n return new_checkpoint" }, { "identifier": "convert_lora_model_level", "path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py", "snippet": "def convert_lora_model_level(state_dict, unet, text_encoder=None, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n \"\"\"convert lora in model level instead of pipeline leval\n \"\"\"\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n assert text_encoder is not None, (\n 'text_encoder must be passed since lora contains text encoder layers')\n curr_layer = text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n # NOTE: load lycon, meybe have bugs :(\n if 'conv_in' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n shape[1] = 4\n curr_layer.weight.data[:, :4, ...] += alpha * (weight_up @ weight_down).view(*shape)\n elif 'conv' in pair_keys[0]:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n weight_up = weight_up.view(weight_up.size(0), -1)\n weight_down = weight_down.view(weight_down.size(0), -1)\n shape = [e for e in curr_layer.weight.data.shape]\n curr_layer.weight.data += alpha * (weight_up @ weight_down).view(*shape)\n elif len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return unet, text_encoder" }, { "identifier": "prepare_mask_coef_by_statistics", "path": "animatediff/utils/util.py", "snippet": "def prepare_mask_coef_by_statistics(video_length: int, cond_frame: int, sim_range: int):\n assert video_length > 0, \\\n 'video_length should be greater than 0'\n\n assert video_length > cond_frame,\\\n 'video_length should be greater than cond_frame'\n\n range_list = RANGE_LIST\n\n assert sim_range < len(range_list),\\\n f'sim_range type{sim_range} not implemented'\n\n coef = range_list[sim_range]\n coef = coef + ([coef[-1]] * (video_length - len(coef)))\n\n order = [abs(i - cond_frame) for i in range(video_length)]\n coef = [coef[order[i]] for i in range(video_length)]\n\n return coef" } ]
import inspect import os.path as osp import numpy as np import torch from dataclasses import dataclass from typing import Callable, List, Optional, Union from diffusers.configuration_utils import FrozenDict from diffusers.loaders import IPAdapterMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import (DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler) from diffusers.utils import (BaseOutput, deprecate, is_accelerate_available, logging) from diffusers.utils.import_utils import is_xformers_available from einops import rearrange from omegaconf import OmegaConf from packaging import version from safetensors import safe_open from tqdm import tqdm from transformers import (CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection) from animatediff.models.resnet import InflatedConv3d from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import (convert_ldm_clip_checkpoint, convert_ldm_unet_checkpoint, convert_ldm_vae_checkpoint) from animatediff.utils.convert_lora_safetensor_to_diffusers import \ convert_lora_model_level from animatediff.utils.util import prepare_mask_coef_by_statistics from accelerate import cpu_offload
14,784
"""Encode image for ip-adapter. Copied from https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa """ dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds @torch.no_grad() def __call__( self, image: np.ndarray, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, global_inf_num: int = 0, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cond_frame: int = 0, mask_sim_template_idx: int = 0, ip_adapter_scale: float = 0, strength: float = 1, progress_fn=None, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor assert strength > 0 and strength <= 1, ( f'"strength" for img2vid must in (0, 1]. But receive {strength}.') # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is None: negative_prompt = DEFAULT_N_PROMPT negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) #timesteps = self.scheduler.timesteps timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( latent_timestep, batch_size * num_videos_per_prompt, 4, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) raw_image = image.copy() image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2) image = image / 255 # [0, 1] image = image * 2 - 1 # [-1, 1] image = image.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=self.unet.dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype) # prepare mask
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py logger = logging.get_logger(__name__) # pylint: disable=invalid-name DEFAULT_N_PROMPT = ('wrong white balance, dark, sketches,worst quality,' 'low quality, deformed, distorted, disfigured, bad eyes, ' 'wrong lips,weird mouth, bad teeth, mutated hands and fingers, ' 'bad anatomy,wrong anatomy, amputation, extra limb, ' 'missing limb, floating,limbs, disconnected limbs, mutation, ' 'ugly, disgusting, bad_pictures, negative_hand-neg') @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class I2VPipeline(DiffusionPipeline, IPAdapterMixin, TextualInversionLoaderMixin): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], # memory_format: torch.memory_format, feature_extractor: CLIPImageProcessor = None, image_encoder: CLIPVisionModelWithProjection = None, ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, image_encoder=image_encoder, feature_extractor=feature_extractor, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) # self.memory_format = memory_format self.use_ip_adapter = False @classmethod def build_pipeline(cls, base_cfg, base_model: str, unet_path: str, dreambooth_path: Optional[str] = None, lora_path: Optional[str] = None, lora_alpha: float = 0, vae_path: Optional[str] = None, ip_adapter_path: Optional[str] = None, ip_adapter_scale: float = 0.0, only_load_vae_decoder: bool = False, only_load_vae_encoder: bool = False) -> 'I2VPipeline': """Method to build pipeline in a faster way~ Args: base_cfg: The config to build model base_mode: The model id to initialize StableDiffusion unet_path: Path for i2v unet dreambooth_path: path for dreambooth model lora_path: path for lora model lora_alpha: value for lora scale only_load_vae_decoder: Only load VAE decoder from dreambooth / VAE ckpt and maitain encoder as original. """ # build unet unet = UNet3DConditionModel.from_pretrained_2d( base_model, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container( base_cfg.unet_additional_kwargs)) old_weights = unet.conv_in.weight old_bias = unet.conv_in.bias new_conv1 = InflatedConv3d( 9, old_weights.shape[0], kernel_size=unet.conv_in.kernel_size, stride=unet.conv_in.stride, padding=unet.conv_in.padding, bias=True if old_bias is not None else False) param = torch.zeros((320,5,3,3),requires_grad=True) new_conv1.weight = torch.nn.Parameter(torch.cat((old_weights,param),dim=1)) if old_bias is not None: new_conv1.bias = old_bias unet.conv_in = new_conv1 unet.config["in_channels"] = 9 unet_ckpt = torch.load(unet_path, map_location='cpu') unet.load_state_dict(unet_ckpt, strict=False) # NOTE: only load temporal layers and condition module # for key, value in unet_ckpt.items(): # if 'motion' in key or 'conv_in' in key: # unet.state_dict()[key].copy_(value) # load vae, tokenizer, text encoder vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae") tokenizer = CLIPTokenizer.from_pretrained(base_model, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(base_model, subfolder="text_encoder") noise_scheduler = DDIMScheduler(**OmegaConf.to_container(base_cfg.noise_scheduler_kwargs)) if dreambooth_path: print(" >>> Begin loading DreamBooth >>>") base_model_state_dict = {} with safe_open(dreambooth_path, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) # load unet converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, unet.config) old_value = converted_unet_checkpoint['conv_in.weight'] new_param = unet_ckpt['conv_in.weight'][:,4:,:,:].clone().cpu() new_value = torch.nn.Parameter(torch.cat((old_value, new_param), dim=1)) converted_unet_checkpoint['conv_in.weight'] = new_value unet.load_state_dict(converted_unet_checkpoint, strict=False) # load vae converted_vae_checkpoint = convert_ldm_vae_checkpoint( base_model_state_dict, vae.config, only_decoder=only_load_vae_decoder, only_encoder=only_load_vae_encoder,) need_strict = not (only_load_vae_decoder or only_load_vae_encoder) vae.load_state_dict(converted_vae_checkpoint, strict=need_strict) print('Prefix in loaded VAE checkpoint: ') print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()])) # load text encoder text_encoder_checkpoint = convert_ldm_clip_checkpoint(base_model_state_dict) if text_encoder_checkpoint: text_encoder.load_state_dict(text_encoder_checkpoint, strict=False) print(" <<< Loaded DreamBooth <<<") if vae_path: print(' >>> Begin loading VAE >>>') vae_state_dict = {} if vae_path.endswith('safetensors'): with safe_open(vae_path, framework="pt", device="cpu") as f: for key in f.keys(): vae_state_dict[key] = f.get_tensor(key) elif vae_path.endswith('ckpt') or vae_path.endswith('pt'): vae_state_dict = torch.load(vae_path, map_location='cpu') if 'state_dict' in vae_state_dict: vae_state_dict = vae_state_dict['state_dict'] vae_state_dict = {f'first_stage_model.{k}': v for k, v in vae_state_dict.items()} converted_vae_checkpoint = convert_ldm_vae_checkpoint( vae_state_dict, vae.config, only_decoder=only_load_vae_decoder, only_encoder=only_load_vae_encoder,) print('Prefix in loaded VAE checkpoint: ') print(set([k.split('.')[0] for k in converted_vae_checkpoint.keys()])) need_strict = not (only_load_vae_decoder or only_load_vae_encoder) vae.load_state_dict(converted_vae_checkpoint, strict=need_strict) print(" <<< Loaded VAE <<<") if lora_path: print(" >>> Begin loading LoRA >>>") lora_dict = {} with safe_open(lora_path, framework='pt', device='cpu') as file: for k in file.keys(): lora_dict[k] = file.get_tensor(k) unet, text_encoder = convert_lora_model_level( lora_dict, unet, text_encoder, alpha=lora_alpha) print(" <<< Loaded LoRA <<<") # move model to device device = torch.device('cuda') unet_dtype = torch.float16 tenc_dtype = torch.float16 vae_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float32 unet = unet.to(device=device, dtype=unet_dtype) text_encoder = text_encoder.to(device=device, dtype=tenc_dtype) vae = vae.to(device=device, dtype=vae_dtype) print(f'Set Unet to {unet_dtype}') print(f'Set text encoder to {tenc_dtype}') print(f'Set vae to {vae_dtype}') if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() pipeline = cls(unet=unet, vae=vae, tokenizer=tokenizer, text_encoder=text_encoder, scheduler=noise_scheduler) # ip_adapter_path = 'h94/IP-Adapter' if ip_adapter_path and ip_adapter_scale > 0: ip_adapter_name = 'ip-adapter_sd15.bin' # only online repo need subfolder if not osp.isdir(ip_adapter_path): subfolder = 'models' else: subfolder = '' pipeline.load_ip_adapter(ip_adapter_path, subfolder, ip_adapter_name) pipeline.set_ip_adapter_scale(ip_adapter_scale) pipeline.use_ip_adapter = True print(f'Load IP-Adapter, scale: {ip_adapter_scale}') # text_inversion_path = './models/TextualInversion/easynegative.safetensors' # if text_inversion_path: # pipeline.load_textual_inversion(text_inversion_path, 'easynegative') return pipeline def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def prepare_latents(self, add_noise_time_step, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) return latents def encode_image(self, image, device, num_images_per_prompt): """Encode image for ip-adapter. Copied from https://github.com/huggingface/diffusers/blob/f9487783228cd500a21555da3346db40e8f05992/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L492-L514 # noqa """ dtype = next(self.image_encoder.parameters()).dtype if not isinstance(image, torch.Tensor): image = self.feature_extractor(image, return_tensors="pt").pixel_values image = image.to(device=device, dtype=dtype) image_embeds = self.image_encoder(image).image_embeds image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) uncond_image_embeds = torch.zeros_like(image_embeds) return image_embeds, uncond_image_embeds @torch.no_grad() def __call__( self, image: np.ndarray, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, global_inf_num: int = 0, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, cond_frame: int = 0, mask_sim_template_idx: int = 0, ip_adapter_scale: float = 0, strength: float = 1, progress_fn=None, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor assert strength > 0 and strength <= 1, ( f'"strength" for img2vid must in (0, 1]. But receive {strength}.') # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is None: negative_prompt = DEFAULT_N_PROMPT negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) #timesteps = self.scheduler.timesteps timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) latent_timestep = timesteps[:1].repeat(batch_size) # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( latent_timestep, batch_size * num_videos_per_prompt, 4, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) raw_image = image.copy() image = torch.from_numpy(image)[None, ...].permute(0, 3, 1, 2) image = image / 255 # [0, 1] image = image * 2 - 1 # [-1, 1] image = image.to(device=device, dtype=self.vae.dtype) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0) else: image_latent = self.vae.encode(image).latent_dist.sample(generator) image_latent = image_latent.to(device=device, dtype=self.unet.dtype) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device=device, dtype=self.unet.dtype) # prepare mask
mask_coef = prepare_mask_coef_by_statistics(video_length, cond_frame, mask_sim_template_idx)
6
2023-12-21 03:29:34+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras, map_location=model.device)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n _module._modules[name].lora_down.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n\n _module._modules[name].lora_up.weight.requires_grad = True if not eval else False\n _module._modules[name].lora_down.weight.requires_grad = True if not eval else False\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "monkeypatch_remove_lora", "path": "ldm/lora.py", "snippet": "def monkeypatch_remove_lora(model):\n for _module, name, _child_module in _find_modules(\n model, search_class=[LoraInjectedLinear, LoraInjectedConv2d]\n ):\n if isinstance(_child_module, LoraInjectedLinear):\n _source = _child_module.linear\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Linear(\n _source.in_features, _source.out_features, bias is not None\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n else:\n _source = _child_module.conv\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Conv2d(\n in_channels=_source.in_channels,\n out_channels=_source.out_channels,\n kernel_size=_source.kernel_size,\n stride=_source.stride,\n padding=_source.padding,\n dilation=_source.dilation,\n groups=_source.groups,\n bias=bias is not None,\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n _module._modules[name] = _tmp" }, { "identifier": "save_lora_weight", "path": "ldm/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n):\n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float16))\n weights.append(_down.weight.to(\"cpu\").to(torch.float16))\n\n torch.save(weights, path)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image_cond\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n unet_trainable=True,\n *args,\n **kwargs,\n ):\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs[\"timesteps\"]\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = \"concat\" if concat_mode else \"crossattn\"\n if cond_stage_config == \"__is_unconditional__\":\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.unet_trainable = unet_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n\n # construct linear projection layer for concatenating image CLIP embedding and RT\n self.cc_projection = nn.Linear(772, 768)\n nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])\n nn.init.zeros_(list(self.cc_projection.parameters())[1])\n self.cc_projection.requires_grad_(True)\n\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if (\n self.scale_by_std\n and self.current_epoch == 0\n and self.global_step == 0\n and batch_idx == 0\n and not self.restarted_from_ckpt\n ):\n assert (\n self.scale_factor == 1.0\n ), \"rather not use custom rescaling and std-rescaling simultaneously\"\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer(\"scale_factor\", 1.0 / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != \"__is_first_stage__\"\n assert config != \"__is_unconditional__\"\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(\n self, samples, desc=\"\", force_no_decoder_quantization=False\n ):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(\n self.decode_first_stage(\n zd.to(self.device), force_not_quantize=force_no_decoder_quantization\n )\n )\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, \"n b c h w -> b n c h w\")\n denoise_grid = rearrange(denoise_grid, \"b n c h w -> (b n) c h w\")\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(\n torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1\n )[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(\n weighting,\n self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"],\n )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(\n L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"],\n )\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(\n self, x, kernel_size, stride, uf=1, df=1\n ): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(\n kernel_size[0], kernel_size[1], Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1,\n padding=0,\n stride=(stride[0] * uf, stride[1] * uf),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h * uf, w * uf\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)\n )\n\n elif df > 1 and uf == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1,\n padding=0,\n stride=(stride[0] // df, stride[1] // df),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h // df, w // df\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)\n )\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(\n self,\n batch,\n k,\n return_first_stage_outputs=False,\n force_c_encode=False,\n cond_key=None,\n return_original_cond=False,\n bs=None,\n uncond=0.05,\n ):\n x = super().get_input(batch, k)\n T = batch[\"T\"].to(memory_format=torch.contiguous_format).float()\n\n if bs is not None:\n x = x[:bs]\n T = T[:bs].to(self.device)\n\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n cond_key = cond_key or self.cond_stage_key\n xc = super().get_input(batch, cond_key).to(self.device)\n if bs is not None:\n xc = xc[:bs]\n cond = {}\n\n # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.\n random = torch.rand(x.size(0), device=x.device)\n prompt_mask = rearrange(random < 2 * uncond, \"n -> n 1 1\")\n input_mask = 1 - rearrange(\n (random >= uncond).float() * (random < 3 * uncond).float(), \"n -> n 1 1 1\"\n )\n null_prompt = self.get_learned_conditioning([\"\"])\n\n # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768]\n # print('=========== xc shape ===========', xc.shape)\n with torch.enable_grad():\n clip_emb = self.get_learned_conditioning(xc).detach()\n null_prompt = self.get_learned_conditioning([\"\"]).detach()\n cond[\"c_crossattn\"] = [\n self.cc_projection(\n torch.cat(\n [\n torch.where(prompt_mask, null_prompt, clip_emb),\n T[:, None, :],\n ],\n dim=-1,\n )\n )\n ]\n cond[\"c_concat\"] = [\n input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()\n ]\n out = [z, cond]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_original_cond:\n out.append(xc)\n return out\n\n # @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n uf = self.split_input_params[\"vqf\"]\n bs, nc, h, w = z.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n z, ks, stride, uf=uf\n )\n\n z = unfold(z) # (bn, nc * prod(**ks), L)\n # 1. Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n # 2. apply model loop over last dim\n if isinstance(self.first_stage_model, VQModelInterface):\n output_list = [\n self.first_stage_model.decode(\n z[:, :, :, :, i],\n force_not_quantize=predict_cids or force_not_quantize,\n )\n for i in range(z.shape[-1])\n ]\n else:\n output_list = [\n self.first_stage_model.decode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)\n o = o * weighting\n # Reverse 1. reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization # norm is shape (1, 1, h, w)\n return decoded\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n df = self.split_input_params[\"vqf\"]\n self.split_input_params[\"original_image_size\"] = x.shape[-2:]\n bs, nc, h, w = x.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x, ks, stride, df=df\n )\n z = unfold(x) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n output_list = [\n self.first_stage_model.encode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization\n return decoded\n\n else:\n return self.first_stage_model.encode(x)\n else:\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, step_ratio=None, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c, step_ratio=step_ratio)\n return loss\n\n def forward(self, x, c, step_ratio=None, *args, **kwargs):\n if step_ratio is not None:\n t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1)\n t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device)\n else:\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset\n def rescale_bbox(bbox):\n x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])\n y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])\n w = min(bbox[2] / crop_coordinates[2], 1 - x0)\n h = min(bbox[3] / crop_coordinates[3], 1 - y0)\n return x0, y0, w, h\n\n return [rescale_bbox(b) for b in bboxes]\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is exptected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = (\n \"c_concat\" if self.model.conditioning_key == \"concat\" else \"c_crossattn\"\n )\n cond = {key: cond}\n\n if hasattr(self, \"split_input_params\"):\n assert len(cond) == 1 # todo can only deal with one conditioning atm\n assert not return_ids\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n\n h, w = x_noisy.shape[-2:]\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x_noisy, ks, stride\n )\n\n z = unfold(x_noisy) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]\n\n if (\n self.cond_stage_key in [\"image\", \"LR_image\", \"segmentation\", \"bbox_img\"]\n and self.model.conditioning_key\n ): # todo check for completeness\n c_key = next(iter(cond.keys())) # get key\n c = next(iter(cond.values())) # get value\n assert len(c) == 1 # todo extend to list with more than one elem\n c = c[0] # get element\n\n c = unfold(c)\n c = c.view(\n (c.shape[0], -1, ks[0], ks[1], c.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]\n\n elif self.cond_stage_key == \"coordinates_bbox\":\n assert (\n \"original_image_size\" in self.split_input_params\n ), \"BoudingBoxRescaling is missing original_image_size\"\n\n # assuming padding of unfold is always 0 and its dilation is always 1\n n_patches_per_row = int((w - ks[0]) / stride[0] + 1)\n full_img_h, full_img_w = self.split_input_params[\"original_image_size\"]\n # as we are operating on latents, we need the factor from the original image size to the\n # spatial latent size to properly rescale the crops for regenerating the bbox annotations\n num_downs = self.first_stage_model.encoder.num_resolutions - 1\n rescale_latent = 2 ** (num_downs)\n\n # get top left postions of patches as conforming for the bbbox tokenizer, therefore we\n # need to rescale the tl patch coordinates to be in between (0,1)\n tl_patch_coordinates = [\n (\n rescale_latent\n * stride[0]\n * (patch_nr % n_patches_per_row)\n / full_img_w,\n rescale_latent\n * stride[1]\n * (patch_nr // n_patches_per_row)\n / full_img_h,\n )\n for patch_nr in range(z.shape[-1])\n ]\n\n # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)\n patch_limits = [\n (\n x_tl,\n y_tl,\n rescale_latent * ks[0] / full_img_w,\n rescale_latent * ks[1] / full_img_h,\n )\n for x_tl, y_tl in tl_patch_coordinates\n ]\n # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]\n\n # tokenize crop coordinates for the bounding boxes of the respective patches\n patch_limits_tknzd = [\n torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(\n self.device\n )\n for bbox in patch_limits\n ] # list of length l with tensors of shape (1, 2)\n # cut tknzd crop position from conditioning\n assert isinstance(cond, dict), \"cond must be dict to be fed into model\"\n cut_cond = cond[\"c_crossattn\"][0][..., :-2].to(self.device)\n\n adapted_cond = torch.stack(\n [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]\n )\n adapted_cond = rearrange(adapted_cond, \"l b n -> (l b) n\")\n adapted_cond = self.get_learned_conditioning(adapted_cond)\n adapted_cond = rearrange(\n adapted_cond, \"(l b) n d -> l b n d\", l=z.shape[-1]\n )\n\n cond_list = [{\"c_crossattn\": [e]} for e in adapted_cond]\n\n else:\n cond_list = [\n cond for i in range(z.shape[-1])\n ] # Todo make this more efficient\n\n # apply model by loop over crops\n output_list = [\n self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])\n ]\n assert not isinstance(\n output_list[0], tuple\n ) # todo cant deal with multiple model outputs check this never happens\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n x_recon = fold(o) / normalization\n\n else:\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = \"train\" if self.training else \"val\"\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f\"{prefix}/loss_simple\": loss_simple.mean()})\n\n if self.logvar.device != self.device:\n self.logvar = self.logvar.to(self.device)\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f\"{prefix}/loss_gamma\": loss.mean()})\n loss_dict.update({\"logvar\": self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f\"{prefix}/loss_vlb\": loss_vlb})\n loss += self.original_elbo_weight * loss_vlb\n loss_dict.update({f\"{prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (\n 0.5 * model_log_variance\n ).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n verbose=True,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if verbose\n else reversed(range(0, timesteps))\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps)\n if verbose\n else reversed(range(0, timesteps))\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n verbose=verbose,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n )\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(\n ddim_steps, batch_size, shape, cond, verbose=False, **kwargs\n )\n\n else:\n samples, intermediates = self.sample(\n cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs\n )\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(\n self, batch_size, null_label=None, image_size=512\n ):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n # todo: get null label from cond_stage_model\n raise NotImplementedError()\n c = repeat(c, \"1 ... -> b ...\", b=batch_size).to(self.device)\n cond = {}\n cond[\"c_crossattn\"] = [c]\n cond[\"c_concat\"] = [\n torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(\n self.device\n )\n ]\n return cond\n\n @torch.no_grad()\n def log_images(\n self,\n batch,\n N=8,\n n_row=4,\n sample=True,\n ddim_steps=200,\n ddim_eta=1.0,\n return_keys=None,\n quantize_denoised=True,\n inpaint=True,\n plot_denoise_rows=False,\n plot_progressive_rows=True,\n plot_diffusion_rows=True,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs,\n ):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(\n batch,\n self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N,\n )\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[self.cond_stage_key],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif self.cond_stage_key == \"class_label\":\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[\"human_label\"],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), \"1 -> b\", b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, \"n b c h w -> b n c h w\")\n diffusion_grid = rearrange(diffusion_grid, \"b n c h w -> (b n) c h w\")\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if (\n quantize_denoised\n and not isinstance(self.first_stage_model, AutoencoderKL)\n and not isinstance(self.first_stage_model, IdentityFirstStage)\n ):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n quantize_denoised=True,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(\n N, unconditional_guidance_label, image_size=x.shape[-1]\n )\n # uc = torch.zeros_like(c)\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[\n f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"\n ] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1.0 - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(\n c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N,\n )\n prog_row = self._get_denoise_row_from_list(\n progressives, desc=\"Progressive Generation\"\n )\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = []\n if self.unet_trainable == \"attn\":\n print(\"Training only unet attention layers\")\n for n, m in self.model.named_modules():\n if isinstance(m, CrossAttention) and n.endswith(\"attn2\"):\n params.extend(m.parameters())\n if self.unet_trainable == \"conv_in\":\n print(\"Training only unet input conv layers\")\n params = list(self.model.diffusion_model.input_blocks[0][0].parameters())\n elif self.unet_trainable is True or self.unet_trainable == \"all\":\n print(\"Training the full unet\")\n params = list(self.model.parameters())\n else:\n raise ValueError(\n f\"Unrecognised setting for unet_trainable: {self.unet_trainable}\"\n )\n\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print(\"Diffusion model optimizing logvar\")\n params.append(self.logvar)\n\n if self.cc_projection is not None:\n params = params + list(self.cc_projection.parameters())\n print(\"========== optimizing for cc projection weight ==========\")\n\n opt = torch.optim.AdamW(\n [\n {\"params\": self.model.parameters(), \"lr\": lr},\n {\"params\": self.cc_projection.parameters(), \"lr\": 10.0 * lr},\n ],\n lr=lr,\n )\n if self.use_scheduler:\n assert \"target\" in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n \"scheduler\": LambdaLR(opt, lr_lambda=scheduler.schedule),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n ]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "load_model_from_config", "path": "ldm/util.py", "snippet": "def load_model_from_config(config, ckpt, device, vram_O=False, verbose=False):\n print(f\"[INFO] Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n\n if \"global_step\" in pl_sd and verbose:\n print(f'[INFO] Global Step: {pl_sd[\"global_step\"]}')\n\n sd = pl_sd[\"state_dict\"]\n\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n\n if len(m) > 0 and verbose:\n print(\"[INFO] Missing keys: \\n\", m)\n if len(u) > 0 and verbose:\n print(\"[INFO] Unexpected keys: \\n\", u)\n\n # manually load ema and delete it to save GPU memory\n if model.use_ema:\n if verbose:\n print(\"[INFO] Loading EMA\")\n model.model_ema.copy_to(model.model)\n del model.model_ema\n\n if vram_O:\n # we don't need decoder\n del model.first_stage_model.decoder\n\n torch.cuda.empty_cache()\n model.eval().to(device)\n\n return model" }, { "identifier": "make_T", "path": "util/pose.py", "snippet": "def make_T(theta, azimuth, distance, in_deg=False):\n if in_deg:\n theta, azimuth = theta.deg2rad(), azimuth.deg2rad()\n return torch.stack(\n (\n theta,\n torch.sin(azimuth),\n torch.cos(azimuth),\n distance,\n )\n )" }, { "identifier": "default", "path": "util/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" } ]
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
14,862
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32
self.model: LatentDiffusion = load_model_from_config(
3
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/model/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "IGNORE_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "tokenizer_image_token", "path": "VisualSearch/model/llava/mm_utils.py", "snippet": "def tokenizer_image_token(\n prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None\n):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(\"<image>\")]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if (\n len(prompt_chunks) > 0\n and len(prompt_chunks[0]) > 0\n and prompt_chunks[0][0] == tokenizer.bos_token_id\n ):\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == \"pt\":\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f\"Unsupported tensor type: {return_tensors}\")\n return input_ids" }, { "identifier": "get_mask_from_json", "path": "VisualSearch/utils/data_processing.py", "snippet": "def get_mask_from_json(json_path, img):\n try:\n with open(json_path, \"r\") as r:\n anno = json.loads(r.read())\n except:\n with open(json_path, \"r\", encoding=\"cp1252\") as r:\n anno = json.loads(r.read())\n\n inform = anno[\"shapes\"]\n comments = anno[\"text\"]\n is_sentence = anno[\"is_sentence\"]\n\n height, width = img.shape[:2]\n\n ### sort polies by area\n area_list = []\n valid_poly_list = []\n for i in inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n if \"flag\" == label_id.lower(): ## meaningless deprecated annotations\n continue\n\n tmp_mask = np.zeros((height, width), dtype=np.uint8)\n cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)\n cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)\n tmp_area = tmp_mask.sum()\n\n area_list.append(tmp_area)\n valid_poly_list.append(i)\n\n ### ground-truth mask\n sort_index = np.argsort(area_list)[::-1].astype(np.int32)\n sort_index = list(sort_index)\n sort_inform = []\n for s_idx in sort_index:\n sort_inform.append(valid_poly_list[s_idx])\n\n mask = np.zeros((height, width), dtype=np.uint8)\n for i in sort_inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n\n if \"ignore\" in label_id.lower():\n label_value = 255 # ignored during evaluation\n else:\n label_value = 1 # target\n\n cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)\n cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)\n\n return mask, comments, is_sentence" }, { "identifier": "REFER", "path": "VisualSearch/utils/refer.py", "snippet": "class REFER:\n def __init__(self, data_root, dataset=\"refcoco\", splitBy=\"unc\"):\n # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog\n # also provide dataset name and splitBy information\n # e.g., dataset = 'refcoco', splitBy = 'unc'\n print(\"loading dataset %s into memory...\" % dataset)\n self.ROOT_DIR = osp.abspath(osp.dirname(__file__))\n self.DATA_DIR = osp.join(data_root, dataset)\n if dataset in [\"refcoco\", \"refcoco+\", \"refcocog\"]:\n self.IMAGE_DIR = osp.join(data_root, \"images/mscoco/images/train2014\")\n elif dataset == \"refclef\":\n self.IMAGE_DIR = osp.join(data_root, \"images/saiapr_tc-12\")\n else:\n print(\"No refer dataset is called [%s]\" % dataset)\n sys.exit()\n\n self.dataset = dataset\n\n # load refs from data/dataset/refs(dataset).json\n tic = time.time()\n\n ref_file = osp.join(self.DATA_DIR, \"refs(\" + splitBy + \").p\")\n print(\"ref_file: \", ref_file)\n self.data = {}\n self.data[\"dataset\"] = dataset\n self.data[\"refs\"] = pickle.load(open(ref_file, \"rb\"))\n\n # load annotations from data/dataset/instances.json\n instances_file = osp.join(self.DATA_DIR, \"instances.json\")\n instances = json.load(open(instances_file, \"rb\"))\n self.data[\"images\"] = instances[\"images\"]\n self.data[\"annotations\"] = instances[\"annotations\"]\n self.data[\"categories\"] = instances[\"categories\"]\n\n # create index\n self.createIndex()\n print(\"DONE (t=%.2fs)\" % (time.time() - tic))\n\n def createIndex(self):\n # create sets of mapping\n # 1) Refs: \t \t{ref_id: ref}\n # 2) Anns: \t \t{ann_id: ann}\n # 3) Imgs:\t\t \t{image_id: image}\n # 4) Cats: \t \t{category_id: category_name}\n # 5) Sents: \t{sent_id: sent}\n # 6) imgToRefs: \t{image_id: refs}\n # 7) imgToAnns: \t{image_id: anns}\n # 8) refToAnn: \t{ref_id: ann}\n # 9) annToRef: \t{ann_id: ref}\n # 10) catToRefs: \t{category_id: refs}\n # 11) sentToRef: \t{sent_id: ref}\n # 12) sentToTokens: {sent_id: tokens}\n print(\"creating index...\")\n # fetch info from instances\n Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}\n for ann in self.data[\"annotations\"]:\n Anns[ann[\"id\"]] = ann\n imgToAnns[ann[\"image_id\"]] = imgToAnns.get(ann[\"image_id\"], []) + [ann]\n for img in self.data[\"images\"]:\n Imgs[img[\"id\"]] = img\n for cat in self.data[\"categories\"]:\n Cats[cat[\"id\"]] = cat[\"name\"]\n\n # fetch info from refs\n Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}\n Sents, sentToRef, sentToTokens = {}, {}, {}\n for ref in self.data[\"refs\"]:\n # ids\n ref_id = ref[\"ref_id\"]\n ann_id = ref[\"ann_id\"]\n category_id = ref[\"category_id\"]\n image_id = ref[\"image_id\"]\n\n # add mapping related to ref\n Refs[ref_id] = ref\n imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]\n catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]\n refToAnn[ref_id] = Anns[ann_id]\n annToRef[ann_id] = ref\n\n # add mapping of sent\n for sent in ref[\"sentences\"]:\n Sents[sent[\"sent_id\"]] = sent\n sentToRef[sent[\"sent_id\"]] = ref\n sentToTokens[sent[\"sent_id\"]] = sent[\"tokens\"]\n\n # create class members\n self.Refs = Refs\n self.Anns = Anns\n self.Imgs = Imgs\n self.Cats = Cats\n self.Sents = Sents\n self.imgToRefs = imgToRefs\n self.imgToAnns = imgToAnns\n self.refToAnn = refToAnn\n self.annToRef = annToRef\n self.catToRefs = catToRefs\n self.sentToRef = sentToRef\n self.sentToTokens = sentToTokens\n print(\"index created.\")\n\n def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=\"\"):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:\n refs = self.data[\"refs\"]\n else:\n if not len(image_ids) == 0:\n refs = [self.imgToRefs[image_id] for image_id in image_ids]\n else:\n refs = self.data[\"refs\"]\n if not len(cat_ids) == 0:\n refs = [ref for ref in refs if ref[\"category_id\"] in cat_ids]\n if not len(ref_ids) == 0:\n refs = [ref for ref in refs if ref[\"ref_id\"] in ref_ids]\n if not len(split) == 0:\n if split in [\"testA\", \"testB\", \"testC\"]:\n refs = [\n ref for ref in refs if split[-1] in ref[\"split\"]\n ] # we also consider testAB, testBC, ...\n elif split in [\"testAB\", \"testBC\", \"testAC\"]:\n refs = [\n ref for ref in refs if ref[\"split\"] == split\n ] # rarely used I guess...\n elif split == \"test\":\n refs = [ref for ref in refs if \"test\" in ref[\"split\"]]\n elif split == \"train\" or split == \"val\":\n refs = [ref for ref in refs if ref[\"split\"] == split]\n else:\n print(\"No such split [%s]\" % split)\n sys.exit()\n ref_ids = [ref[\"ref_id\"] for ref in refs]\n return ref_ids\n\n def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:\n ann_ids = [ann[\"id\"] for ann in self.data[\"annotations\"]]\n else:\n if not len(image_ids) == 0:\n lists = [\n self.imgToAnns[image_id]\n for image_id in image_ids\n if image_id in self.imgToAnns\n ] # list of [anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.data[\"annotations\"]\n if not len(cat_ids) == 0:\n anns = [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n ann_ids = [ann[\"id\"] for ann in anns]\n if not len(ref_ids) == 0:\n ids = set(ann_ids).intersection(\n set([self.Refs[ref_id][\"ann_id\"] for ref_id in ref_ids])\n )\n return ann_ids\n\n def getImgIds(self, ref_ids=[]):\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if not len(ref_ids) == 0:\n image_ids = list(set([self.Refs[ref_id][\"image_id\"] for ref_id in ref_ids]))\n else:\n image_ids = self.Imgs.keys()\n return image_ids\n\n def getCatIds(self):\n return self.Cats.keys()\n\n def loadRefs(self, ref_ids=[]):\n if type(ref_ids) == list:\n return [self.Refs[ref_id] for ref_id in ref_ids]\n elif type(ref_ids) == int:\n return [self.Refs[ref_ids]]\n\n def loadAnns(self, ann_ids=[]):\n if type(ann_ids) == list:\n return [self.Anns[ann_id] for ann_id in ann_ids]\n elif type(ann_ids) == int or type(ann_ids) == unicode:\n return [self.Anns[ann_ids]]\n\n def loadImgs(self, image_ids=[]):\n if type(image_ids) == list:\n return [self.Imgs[image_id] for image_id in image_ids]\n elif type(image_ids) == int:\n return [self.Imgs[image_ids]]\n\n def loadCats(self, cat_ids=[]):\n if type(cat_ids) == list:\n return [self.Cats[cat_id] for cat_id in cat_ids]\n elif type(cat_ids) == int:\n return [self.Cats[cat_ids]]\n\n def getRefBox(self, ref_id):\n ref = self.Refs[ref_id]\n ann = self.refToAnn[ref_id]\n return ann[\"bbox\"] # [x, y, w, h]\n\n def showRef(self, ref, seg_box=\"seg\"):\n ax = plt.gca()\n # show image\n image = self.Imgs[ref[\"image_id\"]]\n I = io.imread(osp.join(self.IMAGE_DIR, image[\"file_name\"]))\n ax.imshow(I)\n # show refer expression\n for sid, sent in enumerate(ref[\"sentences\"]):\n print(\"%s. %s\" % (sid + 1, sent[\"sent\"]))\n # show segmentations\n if seg_box == \"seg\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n polygons = []\n color = []\n c = \"none\"\n if type(ann[\"segmentation\"][0]) == list:\n # polygon used for refcoco*\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True, alpha=0.4))\n color.append(c)\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 1, 0, 0),\n linewidths=3,\n alpha=1,\n )\n ax.add_collection(p) # thick yellow polygon\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 0, 0, 0),\n linewidths=1,\n alpha=1,\n )\n ax.add_collection(p) # thin red polygon\n else:\n # mask used for refclef\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n # show bounding-box\n elif seg_box == \"box\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n bbox = self.getRefBox(ref[\"ref_id\"])\n box_plot = Rectangle(\n (bbox[0], bbox[1]),\n bbox[2],\n bbox[3],\n fill=False,\n edgecolor=\"green\",\n linewidth=3,\n )\n ax.add_patch(box_plot)\n\n def getMask(self, ref):\n # return mask, area and mask-center\n ann = self.refToAnn[ref[\"ref_id\"]]\n image = self.Imgs[ref[\"image_id\"]]\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(ann[\"segmentation\"], image[\"height\"], image[\"width\"])\n else:\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n # compute area\n area = sum(mask.area(rle)) # should be close to ann['area']\n return {\"mask\": m, \"area\": area}\n # # position\n # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)\n # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)\n # # mass position (if there were multiple regions, we use the largest one.)\n # label_m = label(m, connectivity=m.ndim)\n # regions = regionprops(label_m)\n # if len(regions) > 0:\n # \tlargest_id = np.argmax(np.array([props.filled_area for props in regions]))\n # \tlargest_props = regions[largest_id]\n # \tmass_y, mass_x = largest_props.centroid\n # else:\n # \tmass_x, mass_y = position_x, position_y\n # # if centroid is not in mask, we find the closest point to it from mask\n # if m[mass_y, mass_x] != 1:\n # \tprint('Finding closes mask point ...')\n # \tkernel = np.ones((10, 10),np.uint8)\n # \tme = cv2.erode(m, kernel, iterations = 1)\n # \tpoints = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style\n # \tpoints = np.array(points)\n # \tdist = np.sum((points - (mass_y, mass_x))**2, axis=1)\n # \tid = np.argsort(dist)[0]\n # \tmass_y, mass_x = points[id]\n # \t# return\n # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}\n # # show image and mask\n # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))\n # plt.figure()\n # plt.imshow(I)\n # ax = plt.gca()\n # img = np.ones( (m.shape[0], m.shape[1], 3) )\n # color_mask = np.array([2.0,166.0,101.0])/255\n # for i in range(3):\n # img[:,:,i] = color_mask[i]\n # ax.imshow(np.dstack( (img, m*0.5) ))\n # plt.show()\n\n def showMask(self, ref):\n M = self.getMask(ref)\n msk = M[\"mask\"]\n ax = plt.gca()\n ax.imshow(msk)" }, { "identifier": "ReferSegDataset", "path": "VisualSearch/utils/refer_seg_dataset.py", "snippet": "class ReferSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n refer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n DATA_DIR = os.path.join(base_dir, \"refer_seg\")\n self.refer_seg_ds_list = refer_seg_data.split(\n \"||\"\n ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']\n self.refer_seg_data = {}\n for ds in self.refer_seg_ds_list:\n if ds == \"refcocog\":\n splitBy = \"umd\"\n else:\n splitBy = \"unc\"\n\n if ds == \"grefcoco\":\n refer_api = G_REFER(DATA_DIR, ds, splitBy)\n else:\n refer_api = REFER(DATA_DIR, ds, splitBy)\n ref_ids_train = refer_api.getRefIds(split=\"train\")\n images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)\n refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)\n\n refer_seg_ds = {}\n refer_seg_ds[\"images\"] = []\n loaded_images = refer_api.loadImgs(image_ids=images_ids_train)\n\n for item in loaded_images:\n item = item.copy()\n if ds == \"refclef\":\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/saiapr_tc-12\", item[\"file_name\"]\n )\n else:\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/mscoco/images/train2014\", item[\"file_name\"]\n )\n refer_seg_ds[\"images\"].append(item)\n refer_seg_ds[\"annotations\"] = refer_api.Anns # anns_train\n\n print(\n \"dataset {} (refs {}) (train split) has {} images and {} annotations.\".format(\n ds,\n splitBy,\n len(refer_seg_ds[\"images\"]),\n len(refer_seg_ds[\"annotations\"]),\n )\n )\n\n img2refs = {}\n for ref in refs_train:\n image_id = ref[\"image_id\"]\n img2refs[image_id] = img2refs.get(image_id, []) + [\n ref,\n ]\n refer_seg_ds[\"img2refs\"] = img2refs\n self.refer_seg_data[ds] = refer_seg_ds\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.refer_seg_ds_list) - 1)\n ds = self.refer_seg_ds_list[ds]\n refer_seg_ds = self.refer_seg_data[ds]\n images = refer_seg_ds[\"images\"]\n annotations = refer_seg_ds[\"annotations\"]\n img2refs = refer_seg_ds[\"img2refs\"]\n idx = random.randint(0, len(images) - 1)\n image_info = images[idx]\n image_path = image_info[\"file_name\"]\n image_id = image_info[\"id\"]\n refs = img2refs[image_id]\n if len(refs) == 0:\n return self.__getitem__(0)\n\n sents = []\n ann_ids = []\n for ref in refs:\n for sent in ref[\"sentences\"]:\n text = sent[\"sent\"]\n sents.append(text)\n ann_ids.append(ref[\"ann_id\"])\n if len(sents) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(sents))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(sents)))\n sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()\n # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()\n sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]\n sampled_classes = sampled_sents\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n for text in sampled_classes:\n text = text.strip()\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n flag = False\n masks = []\n bboxes_labels = []\n for ann_id in sampled_ann_ids:\n if isinstance(ann_id, list):\n assert False\n flag = True\n if -1 in ann_id:\n assert len(ann_id) == 1\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n else:\n m_final = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n for ann_id_i in ann_id:\n ann = annotations[ann_id_i]\n\n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n else:\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"],\n image_info[\"height\"],\n image_info[\"width\"],\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n m_final = m_final | m\n m = m_final\n masks.append(m)\n continue\n \n ann = annotations[ann_id]\n cur_bboxes = [ann['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n \n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n masks.append(m)\n continue\n\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"], image_info[\"height\"], image_info[\"width\"]\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n masks.append(m)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n masks = np.stack(masks, axis=0)\n\n\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "SegDetDataset", "path": "VisualSearch/utils/general_segdet_dataset.py", "snippet": "class SegDetDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n general_segdet_data=\"objects365||cocostuff||paco_lvis\",\n general_segdet_sample_rate=[2,1,1]\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.general_segdet_datas = general_segdet_data.split(\"||\")\n num_images = []\n for ds in self.general_segdet_datas:\n if ds == \"cocostuff\":\n classes, images, labels, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels, bboxes)\n elif ds == \"objects365\":\n classes, images, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, bboxes)\n else:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n num_images.append(len(images))\n sample_rate = np.array(general_segdet_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n if \"cocostuff\" in self.general_segdet_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.general_segdet_datas))), p=self.sample_rate)\n ds = self.general_segdet_datas[ds]\n\n if ds in [\"paco_lvis\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_dir, \"coco2017\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n anns_category2instances = dict()\n for ann in anns:\n category_id = ann['category_id']\n if category_id not in anns_category2instances:\n anns_category2instances[category_id] = []\n anns_category2instances[category_id].append(ann)\n if len(anns_category2instances) == 0:\n return self.__getitem__(0)\n if len(anns_category2instances) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n list(anns_category2instances.keys()), size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = list(anns_category2instances.keys())\n sampled_classes = []\n for category_id in sampled_anns:\n sampled_cls = class_map[category_id]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n name = name.replace('_', ' ')\n sampled_classes.append(name)\n\n elif ds in [\"cocostuff\"]:\n image, labels, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n bboxes = bboxes_all[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n elif ds in ['objects365']:\n image, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n bboxes = bboxes_all[idx]\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = set()\n for bbox_info in bboxes:\n unique_label.add(bbox_info['category_id'])\n unique_label = list(unique_label)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n\n questions = []\n answers = []\n class_ids = []\n bboxes_labels = []\n for i, sampled_cls in enumerate(sampled_classes):\n text = sampled_cls\n if ds in ['objects365']:\n text = random.sample(text.split('/'), 1)[0]\n \n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n category_id = sampled_anns[i]\n cur_bboxes = [instance['bbox'] for instance in anns_category2instances[category_id]]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n if ds in ['objects365']:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id'] == class_id]\n else:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id']-1 == class_id]\n cur_bboxes = cur_bboxes[:100]\n assert len(cur_bboxes) > 0\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for category_id in sampled_anns:\n try:\n cur_anns = anns_category2instances[category_id]\n cur_mask = None\n for ann in cur_anns:\n if cur_mask is None:\n cur_mask = coco_api.annToMask(ann)\n else:\n cur_mask = cur_mask | coco_api.annToMask(ann)\n assert cur_mask is not None\n masks.append(cur_mask)\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n elif ds in ['objects365']:\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n masks_valid = [0]*len(bboxes_labels)\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "MixedGroundingDataset", "path": "VisualSearch/utils/mixed_grounding_dataset.py", "snippet": "class MixedGroundingDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n ):\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n with open(os.path.join(base_dir, 'MixedGrounding', 'goldG_train.json')) as f:\n self.images = json.load(f)\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n\n idx = random.randint(0, len(self.images) - 1)\n image_info = self.images[idx]\n image_data_source = image_info['data_source']\n file_name = image_info[\"file_name\"]\n assert image_data_source in ['coco', 'vg', 'flickr']\n if image_data_source == 'coco':\n image_path = os.path.join(self.base_dir, 'coco2014/train2014', file_name)\n elif image_data_source == 'vg':\n image_path = os.path.join(self.base_dir, 'MixedGrounding/GQA/images', file_name)\n else:\n image_path = os.path.join(self.base_dir, 'MixedGrounding/flickr30k-images', file_name)\n caption = image_info['caption']\n instances = image_info['instances']\n if len(instances) == 0:\n return self.__getitem__(0)\n\n if len(instances) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(instances))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(instances)))\n\n sampled_classes = sampled_inds\n \n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n bboxes_labels = []\n for sample_ind in sampled_inds:\n text = []\n tokens_positive = instances[sample_ind]['tokens_positive']\n for token in tokens_positive:\n text.append(caption[token[0]:token[1]])\n text = \" \".join(text)\n text = text.strip()\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n cur_bboxes = [instances[sample_ind]['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n \n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [0]*len(bboxes_labels)\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "VQADataset", "path": "VisualSearch/utils/vqa_dataset.py", "snippet": "class VQADataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n vqa_data=\"possible_locations_conv_86k||llava_instruct_150k\",\n vqa_sample_rate=[2,1],\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n DATA_DIR = os.path.join(base_image_dir, \"vsm_vqa_data\")\n self.vqa_image_root = os.path.join(base_image_dir, \"coco2017/train2017\")\n vqa_datas = vqa_data.split(\"||\")\n self.vqa_datas = []\n for data in vqa_datas:\n with open(os.path.join(DATA_DIR, \"{}.json\".format(data))) as f:\n data = json.load(f)\n self.vqa_datas.append(data)\n sample_rate = np.array(vqa_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.vqa_datas))), p=self.sample_rate)\n ds = self.vqa_datas[ds]\n idx = random.randint(0, len(ds) - 1)\n item = ds[idx]\n image_path = os.path.join(self.vqa_image_root, item[\"image\"])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ori_size = image.shape[:2]\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n conv = conversation_lib.default_conversation.copy()\n source = item[\"conversations\"]\n source = preprocess_multimodal(\n copy.deepcopy(source),\n mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,\n )\n roles = {\"human\": conv.roles[0], \"gpt\": conv.roles[1]}\n conversations = []\n if roles[source[0][\"from\"]] != conv.roles[0]:\n # Skip the first one if it is not from human\n source = source[1:]\n conv.messages = []\n for j, sentence in enumerate(source):\n role = roles[sentence[\"from\"]]\n assert role == conv.roles[j % 2], f\"{j}\"\n conv.append_message(role, sentence[\"value\"])\n conversations.append(conv.get_prompt())\n\n questions = conversations\n sampled_classes = conversations\n\n masks = torch.rand(1, *ori_size)\n label = torch.ones(ori_size) * self.ignore_label\n bboxes_labels = [torch.tensor([[0.5,0.5,1.0,1.0]])]\n bboxes_valid = [0]\n masks_valid = [0]\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "box_xyxy_to_cxcywh", "path": "VisualSearch/utils/utils.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "expand2square", "path": "VisualSearch/utils/utils.py", "snippet": "def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, 0))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, (0, 0))\n return result" } ]
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
14,499
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask
conv = conversation_lib.default_conversation.copy()
11
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/data/dataparsers/nelfpro_dataparser.py
[ { "identifier": "plot_point3d", "path": "nerfstudio/utils/plotly_utils_nelfpro.py", "snippet": "def plot_point3d(xyz, color):\n point_cloud_size = 0.8\n cam_centers_go = go.Scatter3d(\n x = xyz[:, 0], \n y = xyz[:, 1], \n z = xyz[:, 2], \n mode=\"markers\",\n name=\"sparse point clouds\",\n marker=dict(size=point_cloud_size, color=color),\n )\n return [cam_centers_go]" }, { "identifier": "FactorPoseGenerator", "path": "nerfstudio/data/utils/probe_sample.py", "snippet": "class FactorPoseGenerator:\n def __init__(self, strategy, return_type='index'):\n self.strategy = strategy\n self.return_type = return_type\n\n def sample(self, positions: Union[TensorType['num_cam', 3, 4], TensorType['num_cam', 3]], num_samples: int, **kwargs):\n assert positions.shape[0] >= num_samples, f'Number of cameras ({positions.shape[0]}) must be >= number of samples ({num_samples})'\n\n if positions.shape[-2:] == (3, 4):\n positions = positions[:, :3, 3]\n\n if self.return_type == 'index':\n if self.strategy == 'random':\n return self._sample_random(positions, num_samples, **kwargs)\n elif self.strategy == 'fps':\n return self._sample_fps(positions, num_samples, **kwargs)\n else:\n raise NotImplementedError(f'Camera sampling strategy {self.strategy} not implemented for return type {self.return_type}')\n elif self.return_type == 'position':\n if self.strategy == 'kmeans':\n return self._sample_kmeans(positions, num_samples, **kwargs)\n else:\n raise NotImplementedError(f'Camera sampling strategy {self.strategy} not implemented for return type {self.return_type}')\n else:\n raise NotImplementedError(f'Unknown return type {self.return_type}')\n \n @staticmethod\n def get_random_offset(shape: torch.Size, scale: float=0.1, seed: int=1737) -> torch.Tensor:\n torch.manual_seed(seed=seed)\n return torch.randn(shape) * scale\n \n def _sample_random(self, positions, num_samples):\n np.random.seed(1737)\n indices = np.random.choice(range(positions.shape[0]), size=num_samples, replace=False)\n return indices\n\n def _sample_fps(self, positions, num_samples, return_order=False):\n \"\"\"Iteratively remove points (views) with the minimum distance to its closest neighbor.\"\"\"\n mink = 1\n if isinstance(positions, np.ndarray):\n positions = torch.from_numpy(positions)\n\n points = positions\n n = len(points)\n if n < num_samples:\n print(\"INVALID. n_points_to_sample must be smaller than points length!\")\n return None\n else:\n current_subset = len(points)\n\n # compute pairwise distances\n A = points.unsqueeze(0) # 1 x n x 3\n B = points.unsqueeze(1) # n x 1 x 3\n pairwise_distances_grid = torch.norm(A - B, dim=-1) # n x n\n max_distance = pairwise_distances_grid.max()\n\n # distances on the diagonal are zero, set them to the maximum\n pairwise_distances_grid[pairwise_distances_grid == 0.0] = max_distance\n\n removal_order = []\n while current_subset != num_samples:\n # flat_index = torch.argmin(pairwise_distances_grid, keepdim=True)\n # min_y = torch.div(flat_index, n, rounding_mode=\"trunc\")\n partitionk = mink if mink > 1 else 2\n mink_vals, mink_idx = torch.topk(pairwise_distances_grid, partitionk, largest=False, dim=0)\n minavg_vals = mink_vals.mean(dim=0) if mink > 1 else mink_vals\n if (minavg_vals == np.inf).all():\n minavg_vals, mink_idx = torch.topk(pairwise_distances_grid, 1, largest=False, dim=0)\n min_y = torch.argmin(minavg_vals, keepdim=True)\n\n # check for a better order between A=(x,min_y) and B=(min_y,x) and their second closest points\n if mink == 1:\n x = mink_idx[0, min_y]\n A = mink_vals[0, min_y]\n B = mink_vals[0, x]\n assert A == B\n\n if mink_vals[1, min_y] > mink_vals[1, x]:\n min_y = x\n\n pairwise_distances_grid[:, min_y] = np.inf\n pairwise_distances_grid[min_y, :] = np.inf\n removal_order.append(min_y.item())\n current_subset -= 1\n\n mask = pairwise_distances_grid != np.inf\n\n select_index = torch.nonzero(torch.sum(mask, dim=0)).squeeze().numpy()\n\n if not return_order:\n return select_index\n else:\n return select_index, removal_order\n \n def _sample_kmeans(self, positions, num_samples):\n kmeans = KMeans(n_clusters=num_samples, random_state=1737, init='k-means++').fit(positions)\n return torch.from_numpy(kmeans.cluster_centers_)" }, { "identifier": "camera_utils", "path": "nerfstudio/cameras/camera_utils.py", "snippet": "_EPS = np.finfo(float).eps * 4.0\n M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]\n K = np.array(\n [\n [m00 - m11 - m22, 0.0, 0.0, 0.0],\n [m01 + m10, m11 - m00 - m22, 0.0, 0.0],\n [m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],\n [m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],\n ]\n )\ndef unit_vector(data, axis: Optional[int] = None) -> np.ndarray:\ndef quaternion_from_matrix(matrix, isprecise: bool = False) -> np.ndarray:\ndef quaternion_slerp(quat0, quat1, fraction: float, spin: int = 0, shortestpath: bool = True) -> np.ndarray:\ndef quaternion_matrix(quaternion) -> np.ndarray:\ndef get_interpolated_poses(pose_a, pose_b, steps: int = 10) -> List[float]:\ndef get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:\ndef get_interpolated_poses_many(\n poses: TensorType[\"num_poses\", 3, 4],\n Ks: TensorType[\"num_poses\", 3, 3],\n steps_per_transition=10,\n) -> Tuple[TensorType[\"num_poses\", 3, 4], TensorType[\"num_poses\", 3, 3]]:\ndef normalize(x) -> TensorType[...]:\ndef viewmatrix(lookat, up, pos) -> TensorType[...]:\ndef get_distortion_params(\n k1: float = 0.0,\n k2: float = 0.0,\n k3: float = 0.0,\n k4: float = 0.0,\n p1: float = 0.0,\n p2: float = 0.0,\n) -> TensorType[...]:\ndef _compute_residual_and_jacobian(\n x: torch.Tensor,\n y: torch.Tensor,\n xd: torch.Tensor,\n yd: torch.Tensor,\n distortion_params: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:\ndef radial_and_tangential_undistort(\n coords: torch.Tensor,\n distortion_params: torch.Tensor,\n eps: float = 1e-3,\n max_iterations: int = 10,\n) -> torch.Tensor:\ndef rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:\ndef auto_orient_and_center_poses(\n poses: TensorType[\"num_poses\":..., 4, 4], method: Literal[\"pca\", \"up\", \"none\"] = \"up\", center_poses: bool = True\n) -> TensorType[\"num_poses\":..., 3, 4]:" }, { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n probe_config: dict config containing the generated probe information (core and basis)\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\":..., 1]]\n image_filenames: Optional[List[str]]\n probe_config: Optional[list]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n image_filenames: Optional[List[str]] = None,\n probe_config: Optional[list] = None\n ):\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n \n self.image_filenames = image_filenames\n self.probe_config = probe_config\n if self.probe_config is not None:\n self.probe = Probes(self.camera_to_worlds, self.probe_config)\n else:\n self.probe = None\n \n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n def _init_get_fc_xy(self, fc_xy, name):\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times):\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self):\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self):\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n\n directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)\n directions_norm = directions_norm[0]\n\n directions_stack = normalize(directions_stack, dim=-1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n directions_norm=directions_norm,\n times=times,\n probes=self.probe,\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self,\n scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int],\n round_hw=False,\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n round_hw: Whether to round the height and width to the nearest integer.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n if not round_hw:\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)\n else:\n self.height = torch.floor(self.height * scaling_factor + 0.5).to(torch.int64)\n self.width = torch.floor(self.width * scaling_factor + 0.5).to(torch.int64)\n\n def get_plotly(self, camera_group):\n\n # define local necssary coordinates for plotting\n num_cameras = self.camera_to_worlds.shape[0]\n _cam_center_c = np.array([[.0, .0, .0]]).repeat(num_cameras, axis=0)\n _cam_forward_c = np.array([[.0, .0, -1.0]]).repeat(num_cameras, axis=0)\n _cam_up_c = np.array([[.0, 1.0, .0]]).repeat(num_cameras, axis=0)\n _cam_right_c = np.array([[1.0, .0, .0]]).repeat(num_cameras, axis=0)\n\n _pyramid_width = self.width.cpu().numpy() / self.fx.cpu().numpy()\n _pyramid_height = self.height.cpu().numpy() / self.fy.cpu().numpy()\n\n _cam_pyramid_ur = np.concatenate([_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dr = np.concatenate([_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_ul = np.concatenate([-_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dl = np.concatenate([-_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n\n _local_coordinates = {\n 'center': _cam_center_c, \n 'forward': _cam_forward_c, \n 'up': _cam_up_c, \n 'right': _cam_right_c, \n 'pyramid_ur': _cam_pyramid_ur, \n 'pyramid_dr': _cam_pyramid_dr, \n 'pyramid_ul': _cam_pyramid_ul, \n 'pyramid_dl': _cam_pyramid_dl, \n }\n\n # transform it into world coordinates\n data = {}\n for k in _local_coordinates.keys():\n _local_coor_homo = np.concatenate([_local_coordinates[k].reshape(-1, 3) * plotly_camera_scale, np.ones((num_cameras, 1))], axis=-1) # num_cam, 4\n _cw = self.camera_to_worlds.cpu().numpy() # num_cam, 3, 4\n\n _homo = np.einsum('ijk,ik->ij', _cw, _local_coor_homo) # num_cam, 3\n data[k] = _homo[:, :3]\n\n plot_data = plot_camera_components(data, image_list=self.image_filenames, camera_group=camera_group)\n \n if isinstance(plot_data, list):\n return plot_data\n else:\n return [plot_data]" }, { "identifier": "CameraType", "path": "nerfstudio/cameras/cameras.py", "snippet": "class CameraType(Enum):\n \"\"\"Supported camera types.\"\"\"\n\n PERSPECTIVE = auto()\n FISHEYE = auto()\n EQUIRECTANGULAR = auto()" }, { "identifier": "DataParser", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataParser:\n \"\"\"A dataset.\n\n Args:\n config: datasetparser config containing all information needed to instantiate dataset\n \"\"\"\n\n config: DataParserConfig\n\n def __init__(self, config: DataParserConfig):\n super().__init__()\n self.config = config\n\n @abstractmethod\n def _generate_dataparser_outputs(self, split: str = \"train\") -> DataparserOutputs:\n \"\"\"Abstract method that returns the dataparser outputs for the given split.\n\n Args:\n split: Which dataset split to generate (train/test).\n\n Returns:\n DataparserOutputs containing data for the specified dataset and split\n \"\"\"\n\n def get_dataparser_outputs(self, split: str = \"train\") -> DataparserOutputs:\n \"\"\"Returns the dataparser outputs for the given split.\n\n Args:\n split: Which dataset split to generate (train/test).\n\n Returns:\n DataparserOutputs containing data for the specified dataset and split\n \"\"\"\n dataparser_outputs = self._generate_dataparser_outputs(split)\n return dataparser_outputs" }, { "identifier": "DataParserConfig", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataParserConfig(cfg.InstantiateConfig):\n \"\"\"Basic dataset config\"\"\"\n\n _target: Type = field(default_factory=lambda: DataParser)\n \"\"\"_target: target class to instantiate\"\"\"\n data: Path = Path()\n \"\"\"Directory specifying location of data.\"\"\"" }, { "identifier": "DataparserOutputs", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataparserOutputs:\n \"\"\"Dataparser outputs for the which will be used by the DataManager\n for creating RayBundle and RayGT objects.\"\"\"\n\n image_filenames: List[Path]\n \"\"\"Filenames for the images.\"\"\"\n cameras: Cameras\n \"\"\"Camera object storing collection of camera information in dataset.\"\"\"\n alpha_color: Optional[TensorType[3]] = None\n \"\"\"Color of dataset background.\"\"\"\n scene_box: SceneBox = SceneBox()\n \"\"\"Scene box of dataset. Used to bound the scene or provide the scene scale depending on model.\"\"\"\n mask_filenames: Optional[List[Path]] = None\n \"\"\"Filenames for any masks that are required\"\"\"\n depths: Optional[torch.Tensor] = None\n \"\"\"Monocular depth.\"\"\"\n normals: Optional[torch.Tensor] = None\n \"\"\"Monocular normal.\"\"\"\n additional_inputs: Dict[str, Any] = to_immutable_dict({})\n \"\"\"Dictionary of additional dataset information (e.g. semantics/point clouds/masks).\n {input_name:\n ... {\"func\": function to process additional dataparser outputs,\n ... \"kwargs\": dictionary of data to pass into \"func\"}\n }\n \"\"\"\n metadata: Dict[str, Any] = to_immutable_dict({})\n \"\"\"Dictionary of any metadata that be required for the given experiment.\n Will be processed by the InputDataset to create any additional tensors that may be required.\n \"\"\"\n other_data: Dict[str, Any] = to_immutable_dict({})\n \"\"\"\n Dictionary of any other data that may be required for the given experiment but is not used by the InputDataset.\n Example: the point cloud of a scene. \n \"\"\"\n\n def as_dict(self) -> dict:\n \"\"\"Returns the dataclass as a dictionary.\"\"\"\n return vars(self)" }, { "identifier": "SceneBox", "path": "nerfstudio/data/scene_box.py", "snippet": "class SceneBox:\n \"\"\"Data to represent the scene box.\"\"\"\n\n aabb: TensorType[2, 3] = None\n \"\"\"aabb: axis-aligned bounding box.\n aabb[0] is the minimum (x,y,z) point.\n aabb[1] is the maximum (x,y,z) point.\"\"\"\n coarse_binary_gird: Optional[torch.Tensor] = None\n \"\"\"coarse binary grid computed from sparse colmap point cloud, currently only used in neuralrecon in the wild\"\"\"\n near: Optional[float] = 0.1\n \"\"\"near plane for each image\"\"\"\n far: Optional[float] = 6.0\n \"\"\"far plane for each image\"\"\"\n radius: Optional[float] = 1.0\n \"\"\"radius of sphere\"\"\"\n collider_type: Literal[\"box\", \"near_far\", \"sphere\"] = \"box\"\n \"\"\"collider type for each ray, default is box\"\"\"\n\n def get_diagonal_length(self):\n \"\"\"Returns the longest diagonal length.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n length = torch.sqrt((diff**2).sum() + 1e-20)\n return length\n\n def get_center(self):\n \"\"\"Returns the center of the box.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n return self.aabb[0] + diff / 2.0\n\n def get_centered_and_scaled_scene_box(self, scale_factor: Union[float, torch.Tensor] = 1.0):\n \"\"\"Returns a new box that has been shifted and rescaled to be centered\n about the origin.\n\n Args:\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n return SceneBox(aabb=(self.aabb - self.get_center()) * scale_factor)\n\n @staticmethod\n def get_normalized_positions(positions: TensorType[..., 3], aabb: TensorType[2, 3]):\n \"\"\"Return normalized positions in range [0, 1] based on the aabb axis-aligned bounding box.\n\n Args:\n positions: the xyz positions\n aabb: the axis-aligned bounding box\n \"\"\"\n aabb_lengths = aabb[1] - aabb[0]\n normalized_positions = (positions - aabb[0]) / aabb_lengths\n return normalized_positions\n\n def to_json(self) -> Dict:\n \"\"\"Returns a json object from the Python object.\"\"\"\n return {\"type\": \"aabb\", \"min_point\": self.aabb[0].tolist(), \"max_point\": self.aabb[1].tolist()}\n\n @staticmethod\n def from_json(json_: Dict) -> \"SceneBox\":\n \"\"\"Returns the an instance of SceneBox from a json dictionary.\n\n Args:\n json_: the json dictionary containing scene box information\n \"\"\"\n assert json_[\"type\"] == \"aabb\"\n aabb = torch.tensor([json_[0], json_[1]])\n return SceneBox(aabb=aabb)\n\n @staticmethod\n def from_camera_poses(poses: TensorType[..., 3, 4], scale_factor: float) -> \"SceneBox\":\n \"\"\"Returns the instance of SceneBox that fully envelopes a set of poses\n\n Args:\n poses: tensor of camera pose matrices\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n xyzs = poses[..., :3, -1]\n aabb = torch.stack([torch.min(xyzs, dim=0)[0], torch.max(xyzs, dim=0)[0]])\n return SceneBox(aabb=aabb * scale_factor)" }, { "identifier": "LLFFRawLoader", "path": "nerfstudio/data/dataparsers/raw_dataset_loader/llff_dataset_raw_loader.py", "snippet": "class LLFFRawLoader(RawLoader):\n def get_loaded_data(self) -> dict:\n num_skipped_image_filanems = 0\n\n # obtain all image files in the directory\n fnames_raw = [] # image names without considering resolution constraint.\n for frame in os.listdir(os.path.join(self.data_dir, \"images\")):\n if frame.endswith(\".JPG\") or frame.endswith(\".jpg\"):\n fnames_raw.append(os.path.join(\"images\", frame))\n\n CONSOLE.log(f\"Detected image files number: {len(fnames_raw)}\")\n\n # adjust resolution and assign correct directory\n fnames = []\n for frame in fnames_raw:\n file_path_scale = self._get_frame(PurePath(frame), self.data_dir)\n if file_path_scale.exists():\n fnames.append(file_path_scale)\n else:\n num_skipped_image_filanems += 1\n\n # obtain loaded image width and height\n img_width, img_height = self._get_size(fnames[0])\n CONSOLE.log(f\"Loaded image resolution: {img_height}x{img_width}\")\n\n if num_skipped_image_filanems > 0:\n CONSOLE.log(f\"Skip {num_skipped_image_filanems} images due to not correct resolution images found.\")\n CONSOLE.log(f\"Detected image files number after resolution check: {len(fnames)}\")\n\n # sort images by names\n inds = np.argsort(fnames)\n frames = [fnames[i] for i in inds]\n CONSOLE.log(f\"Detected total image files number: {len(frames)}\")\n if self.partition_index is not None:\n frames = frames[self.partition_index[0]:self.partition_index[1]]\n CONSOLE.log(f\"Load dataset partition of {self.partition_index[0]}-{self.partition_index[1]}\")\n \n # load poses data\n poses_bounds = np.load(os.path.join(self.data_dir, 'poses_bounds.npy'))\n assert poses_bounds.shape[0] == len(frames), \"loaded poses and image frames do not match {} and {}.\".format(poses_bounds.shape[0], len(frames))\n poses_bounds = poses_bounds[:, :15].reshape(poses_bounds.shape[0], 3, 5)\n poses_matrix = np.concatenate(\n [poses_bounds[:, :3, :4], np.tile(np.array([0, 0, 0, 1]), (poses_bounds.shape[0], 1, 1))], axis=1\n )\n assert (poses_bounds[:, 0, 4] == poses_bounds[0, 0, 4]).all(), \"image height is not consistent.\"\n assert (poses_bounds[:, 1, 4] == poses_bounds[0, 1, 4]).all(), \"image width is not consistent.\"\n\n # load point clouds (if available)\n point3d_xyz = None\n point3d_rgb = None\n if os.path.exists(os.path.join(self.data_dir, 'sparse', '0', 'points3D.bin')):\n point3d = read_points3d_binary(os.path.join(self.data_dir, 'sparse', '0', 'points3D.bin'))\n point3d_xyz = np.stack([item.xyz for item in point3d.values()], axis=0)\n point3d_rgb = np.stack([item.rgb for item in point3d.values()], axis=0)\n CONSOLE.log(f\"Loaded {point3d_xyz.shape[0]} point cloud points.\")\n\n # camera axis convention: from mipnerf360 to blender/nerf.\n # original: down, right, backwards\n # blender: right, up, backwards\n mipnerf360_to_blender = np.array([[0, 1, 0], [-1, 0, 0], [0, 0, 1]])\n poses_matrix[:, :3, :3] = poses_matrix[:, :3, :3] @ mipnerf360_to_blender.T\n # nothing to do with point cloud here, because coordinate transformation only affects rotation matrix. (no translation)\n\n # generate the returned data \n poses, image_filenames = [], []\n fx, fy, cx, cy, height, width, distort = [], [], [], [], [], [], []\n for i, frame in enumerate(frames):\n fx.append(float(poses_bounds[i, 2, 4]))\n fy.append(float(poses_bounds[i, 2, 4]))\n # principle point is assumed to be at the center.\n # careful!: cx corresponds to width while cy corresponds to height.\n cx.append(float(poses_bounds[i, 1, 4] / 2))\n cy.append(float(poses_bounds[i, 0, 4] / 2))\n height.append(int(poses_bounds[i, 0, 4]))\n width.append(int(poses_bounds[i, 1, 4]))\n\n distort.append(camera_utils.get_distortion_params(k1=0, k2=0, k3=0, k4=0, p1=0, p2=0))\n\n image_filenames.append(frame)\n poses.append(poses_matrix[i])\n \n self.ret_data = {\n 'poses': poses, \n 'image_filenames': image_filenames,\n 'fx': fx,\n 'fy': fy, \n 'cx': cx, \n 'cy': cy, \n 'height': height, \n 'width': width, \n 'distort': distort,\n 'point3d_xyz': point3d_xyz,\n 'point3d_rgb': point3d_rgb\n }\n return self.ret_data\n\n def _get_frame(self, filepath: PurePath, data_dir: PurePath, downsample_folder_prefix='images_') -> Path:\n \"\"\"Get the filename of the image file.\n downsample_folder_prefix can be used to point to auxillary image data, e.g. masks\n\n filepath: the base file name of the transformations.\n data_dir: the directory of the data that contains the transform file\n downsample_folder_prefix: prefix of the newly generated downsampled images\n \"\"\"\n if self.downscale_factor is None:\n test_img = Image.open(data_dir / filepath)\n w, h = test_img.size\n max_res = max(w, h)\n df = 0\n while True:\n if (max_res / 2 ** (df)) < MAX_AUTO_RESOLUTION:\n break\n if not (data_dir / f\"{downsample_folder_prefix}{2**(df+1)}\" / filepath.name).exists():\n break\n df += 1\n\n self.downscale_factor = 2**df\n CONSOLE.log(f\"Auto-detected downscale factor: {self.downscale_factor}\")\n\n if self.downscale_factor > 1:\n return data_dir / f\"{downsample_folder_prefix}{self.downscale_factor}\" / filepath.name\n else:\n return data_dir / filepath\n \n def _get_size(self, filepath: PurePath) -> tuple:\n img = Image.open(filepath)\n w, h = img.size\n return w, h\n\n def get_train_val_indices(self, eval_interval):\n all_indices = np.arange(len(self.ret_data['image_filenames'])).astype(np.int32)\n filter_func = lambda x: x % eval_interval == 0\n train_indices = all_indices[~filter_func(all_indices)]\n val_indices = all_indices[filter_func(all_indices)]\n return train_indices, val_indices" }, { "identifier": "KITTI360RawLoader", "path": "nerfstudio/data/dataparsers/raw_dataset_loader/kitti360_dataset_raw_loader.py", "snippet": "class KITTI360RawLoader(RawLoader):\n\n def __init__(self, data_dir: Path, downscale_factor: int = None, partition_index: tuple = None, **kwargs):\n super().__init__(data_dir, downscale_factor, partition_index, **kwargs)\n self.img_width = 1408\n self.img_height = 376\n\n def get_loaded_data(self) -> dict:\n self.downscale_factor = 1\n eval_type = self.other_args['eval_type']\n if eval_type == 'dev':\n self.ret_data, _ = self._load_split(self.data_dir, load_stereo_camera=True, midt=None)\n else:\n raise NotImplementedError(f\"Unknown eval_type {eval_type} for KITTI360RawLoader.\")\n\n return self.ret_data\n\n def _load_split(self, data_dir, load_stereo_camera: bool=True, midt=None) -> dict:\n dataset_dir = data_dir.parent.parent # e.g. ./data/KITTI360\n task_dir = data_dir.parent # e.g. ./data/KITTI360/data_2d_nvs_drop50\n scene = data_dir.name # e.g. train_01, test_01\n data_dir_dir = [f for f in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, f))]\n assert len(data_dir_dir) == 1, f\"KITTI360RawLoader only support a single sequence, but got {data_dir_dir}\"\n seq_id = data_dir_dir[0]\n img_h, img_w = self.img_height, self.img_width\n CONSOLE.log(f\"Loading kitti360 nvs task scene {scene} ...\")\n\n # obtain all training images (from camera_0 and camera_1)\n index_txt = os.path.join(task_dir, '{}.txt'.format(scene))\n with open(index_txt) as f:\n fnames_raw = f.read().splitlines()\n fnames = []\n for frame in fnames_raw:\n # for camera 0 \n fnames.append(Path(os.path.join(data_dir, frame)))\n \n # loading intrinsics \n K0, R_rect_0 = load_intrinsics(os.path.join(dataset_dir, 'calibration', 'perspective.txt'), 0)\n # K for camera 1 has shape of 3x4 and its[:3, 3] is non-zero (due to rectification for the translation on width direction, but it will be considered by T120)\n _, R_rect_1 = load_intrinsics(os.path.join(dataset_dir, 'calibration', 'perspective.txt'), 1)\n\n # loading extrinsics (poses)\n pose_file = os.path.join(dataset_dir, 'data_poses', seq_id, 'cam0_to_world.txt')\n frame_idxs, poses_all = load_poses(pose_file)\n cam0_poses = []\n for fname in fnames:\n img_idx = int(fname.name.split('.')[0])\n pose_idx = np.argwhere(frame_idxs == img_idx)[0][0]\n cam0_poses.append(poses_all[pose_idx])\n cam0_poses = np.stack(cam0_poses, axis=0)\n # translation based on middle frame\n # we avoid using invmid because it will cause world coordinate back to the perspective camera coordinate. \n # invmid = np.linalg.inv(cam0_poses[cam0_poses.shape[0] // 2])\n # cam0_poses = invmid @ cam0_poses\n if midt is None:\n midt = cam0_poses[cam0_poses.shape[0] // 2][:3, 3].copy()\n cam0_poses[:, :3, 3] -= midt\n else:\n cam0_poses[:, :3, 3] -= midt\n\n # loading camera -> GMU coordinates \n Tr = loadCalibrationCameraToPose(os.path.join(dataset_dir, 'calibration/calib_cam_to_pose.txt'))\n T0, T1 = Tr['image_00'], Tr['image_01']\n T0 = T0 @ np.linalg.inv(R_rect_0)\n T1 = T1 @ np.linalg.inv(R_rect_1)\n T021 = np.linalg.inv(T1) @ T0 \n T120 = np.linalg.inv(T021)\n cam1_poses = cam0_poses @ T120[None]\n\n # coordinate conversion\n # kitti360: right, down, forward\n # blender: right, up, backwards\n kitti360_to_blender = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n cam0_poses[:, :3, :3] = cam0_poses[:, :3, :3] @ kitti360_to_blender.T\n cam1_poses[:, :3, :3] = cam1_poses[:, :3, :3] @ kitti360_to_blender.T\n\n # generate the returned data \n poses, image_filenames = [], []\n fx, fy, cx, cy, height, width, distort = [], [], [], [], [], [], []\n\n for i, fname in enumerate(fnames):\n if load_stereo_camera:\n poses += [cam0_poses[i], cam1_poses[i]]\n image_filenames += [fname, path_cam0_to_cam1(fname)]\n fx += [K0[0, 0], K0[0, 0]]\n fy += [K0[1, 1], K0[1, 1]]\n cx += [K0[0, 2], K0[0, 2]]\n cy += [K0[1, 2], K0[1, 2]]\n height += [img_h, img_h]\n width += [img_w, img_w]\n distort += [camera_utils.get_distortion_params(k1=0, k2=0, k3=0, k4=0, p1=0, p2=0), camera_utils.get_distortion_params(k1=0, k2=0, k3=0, k4=0, p1=0, p2=0)]\n else:\n poses += [cam0_poses[i]]\n image_filenames += [fname]\n fx += [K0[0, 0]]\n fy += [K0[1, 1]]\n cx += [K0[0, 2]]\n cy += [K0[1, 2]]\n height += [img_h]\n width += [img_w]\n distort += [camera_utils.get_distortion_params(k1=0, k2=0, k3=0, k4=0, p1=0, p2=0)]\n\n ret_data = {\n 'poses': poses, \n 'image_filenames': image_filenames,\n 'fx': fx,\n 'fy': fy, \n 'cx': cx, \n 'cy': cy, \n 'height': height, \n 'width': width, \n 'distort': distort,\n 'point3d_xyz': None,\n 'point3d_rgb': None, \n }\n\n return ret_data, midt\n \n def get_train_val_indices(self, eval_interval):\n all_indices = np.arange(len(self.ret_data['image_filenames'])).astype(np.int32)\n if self.other_args['eval_type'] == 'dev':\n filter_func = lambda x: (x % eval_interval == 0)\n train_indices = all_indices[~filter_func(all_indices)]\n val_indices = all_indices[filter_func(all_indices)]\n else:\n raise NotImplementedError(f\"Unknown eval_type {self.other_args['eval_type']} for KITTI360RawLoader.\")\n return train_indices, val_indices" }, { "identifier": "BungeeRawLoader", "path": "nerfstudio/data/dataparsers/raw_dataset_loader/bungee_dataset_raw_loader.py", "snippet": "class BungeeRawLoader(RawLoader):\n def get_loaded_data(self) -> dict:\n image_filenames, poses, scene_scaling_factor, scene_origin, scale_split = load_multiscale_data(self.data_dir, factor=self.downscale_factor)\n\n hwf = poses[0,:3,-1]\n poses = poses[:,:3,:4]\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n\n # coordinate transformation\n # original: x-right, y-up, z-backward\n # same conventions as nerfstudio (original nerf paper), no need for further change. \n\n # generate exported files\n poses_list, filenames = [], []\n fx, fy, cx, cy, height, width, distort = [], [], [], [], [], [], []\n for i, frame in enumerate(image_filenames):\n fx.append(focal)\n fy.append(focal)\n cx.append(W/2)\n cy.append(H/2)\n \n height.append(H)\n width.append(W)\n distort.append(camera_utils.get_distortion_params(k1=0, k2=0, k3=0, k4=0, p1=0, p2=0))\n\n filenames.append(frame)\n poses_list.append(np.concatenate([poses[i], np.array([[0, 0, 0, 1]])], axis=0))\n\n self.ret_data = {\n 'poses': poses_list, \n 'image_filenames': filenames,\n 'fx': fx,\n 'fy': fy,\n 'cx': cx,\n 'cy': cy,\n 'height': height,\n 'width': width,\n 'distort': distort,\n 'point3d_xyz': None,\n 'point3d_rgb': None,\n }\n \n return self.ret_data\n\n def get_train_val_indices(self, eval_interval):\n assert eval_interval == 16, 'eval_interval is expected to be 16 in bungee dataset.'\n all_indices = np.arange(len(self.ret_data['image_filenames']))\n filter_func = lambda x: x % eval_interval == 0\n train_indices = all_indices[~filter_func(all_indices)]\n val_indices = all_indices[filter_func(all_indices)]\n return train_indices, val_indices" }, { "identifier": "NeRFStudioRawLoader", "path": "nerfstudio/data/dataparsers/raw_dataset_loader/nerf_dataset_raw_loader.py", "snippet": "class NeRFStudioRawLoader(RawLoader):\n\n def get_loaded_data(self) -> dict:\n meta = load_from_json(self.data_dir / 'transforms.json')\n\n num_skipped_image_filenames = 0\n\n fx_fixed = \"fl_x\" in meta\n fy_fixed = \"fl_y\" in meta\n cx_fixed = \"cx\" in meta\n cy_fixed = \"cy\" in meta\n height_fixed = \"h\" in meta\n width_fixed = \"w\" in meta\n distort_fixed = False\n for distort_key in [\"k1\", \"k2\", \"k3\", \"p1\", \"p2\"]:\n if distort_key in meta:\n distort_fixed = True\n break\n\n fnames = []\n for frame in meta[\"frames\"]:\n filepath = PurePath(frame[\"file_path\"])\n fname = self._get_frame(filepath, self.data_dir)\n fnames.append(fname)\n inds = np.argsort(fnames)\n frames = [meta[\"frames\"][ind] for ind in inds]\n \n if self.partition_index is not None:\n frames = frames[self.partition_index[0]:self.partition_index[1]]\n CONSOLE.log(f\"Load dataset partition of {self.partition_index[0]}-{self.partition_index[1]}\")\n else:\n CONSOLE.log(f\"Load entire dataset of size = {len(frames)}\")\n\n image_filenames = []\n poses = []\n fx = []\n fy = []\n cx = []\n cy = []\n height = []\n width = []\n distort = []\n\n for frame in frames:\n filepath = PurePath(frame[\"file_path\"])\n fname = self._get_frame(filepath,self.data_dir)\n if not fname.exists():\n num_skipped_image_filenames += 1\n continue\n\n transform_matrix = np.array(frame[\"transform_matrix\"])\n if (transform_matrix[:3, 3] > 100).any() or (transform_matrix[:3, 3] < -100).any():\n CONSOLE.log(f'detect abnormal camera center {transform_matrix[:3, 3]}, skip this image.')\n num_skipped_image_filenames += 1\n continue\n\n if not fx_fixed:\n fx.append(float(frame[\"fl_x\"]))\n else:\n fx.append(float(meta[\"fl_x\"]))\n\n if not fy_fixed:\n fy.append(float(frame[\"fl_y\"]))\n else:\n fy.append(float(meta[\"fl_y\"]))\n\n if not cx_fixed:\n cx.append(float(frame[\"cx\"]))\n else:\n cx.append(float(meta[\"cx\"]))\n\n if not cy_fixed:\n cy.append(float(frame[\"cy\"]))\n else:\n cy.append(float(meta[\"cy\"]))\n \n if not height_fixed:\n height.append(int(frame[\"h\"]))\n else:\n height.append(int(meta[\"h\"]))\n \n if not width_fixed:\n width.append(int(frame[\"w\"]))\n else:\n width.append(int(meta[\"w\"]))\n\n if not distort_fixed:\n raise NotImplementedError(\"No support for variable distortion parameters yet.\")\n else:\n distort.append(\n camera_utils.get_distortion_params(\n k1=float(meta[\"k1\"]) if \"k1\" in meta else 0.0,\n k2=float(meta[\"k2\"]) if \"k2\" in meta else 0.0,\n k3=float(meta[\"k3\"]) if \"k3\" in meta else 0.0,\n k4=float(meta[\"k4\"]) if \"k4\" in meta else 0.0,\n p1=float(meta[\"p1\"]) if \"p1\" in meta else 0.0,\n p2=float(meta[\"p2\"]) if \"p2\" in meta else 0.0,\n )\n )\n\n image_filenames.append(fname)\n poses.append(transform_matrix)\n\n # check\n if num_skipped_image_filenames >= 0:\n CONSOLE.log(f\"Skipping {num_skipped_image_filenames} files in dataset.\")\n assert (\n len(image_filenames) != 0\n ), \"\"\"\n No image files found. \n You should check the file_paths in the transforms.json file to make sure they are correct.\n \"\"\"\n\n # load point clouds (if available)\n point3d_xyz = None\n point3d_rgb = None\n if os.path.exists(os.path.join(self.data_dir, 'sparse', '0', 'points3D.bin')):\n point3d = read_points3d_binary(os.path.join(self.data_dir, 'sparse', '0', 'points3D.bin'))\n point3d_xyz = np.stack([item.xyz for item in point3d.values()], axis=0)\n point3d_rgb = np.stack([item.rgb for item in point3d.values()], axis=0)\n CONSOLE.log(f\"Loaded {point3d_xyz.shape[0]} point cloud points.\")\n \n if os.path.exists(os.path.join(self.data_dir, 'colmap', 'sparse', '0', 'points3D.bin')):\n point3d = read_points3d_binary(os.path.join(self.data_dir, 'colmap', 'sparse', '0', 'points3D.bin'))\n point3d_xyz = np.stack([item.xyz for item in point3d.values()], axis=0)\n point3d_rgb = np.stack([item.rgb for item in point3d.values()], axis=0)\n CONSOLE.log(f\"Loaded {point3d_xyz.shape[0]} point cloud points.\")\n\n if isinstance(point3d_xyz, np.ndarray):\n point3d_xyz = point3d_xyz[:, np.array([1, 0, 2])]\n point3d_xyz[..., 2] *= -1\n\n self.ret_data = {\n 'poses': poses, \n 'image_filenames': image_filenames,\n 'fx': fx,\n 'fy': fy, \n 'cx': cx, \n 'cy': cy, \n 'height': height, \n 'width': width, \n 'distort': distort,\n 'point3d_xyz': point3d_xyz,\n 'point3d_rgb': point3d_rgb\n }\n\n # import ipdb\n # ipdb.set_trace()\n\n return self.ret_data\n\n def _get_frame(self, filepath: PurePath, data_dir: PurePath, downsample_folder_prefix='images_') -> Path:\n \"\"\"Get the filename of the image file.\n downsample_folder_prefix can be used to point to auxillary image data, e.g. masks\n\n filepath: the base file name of the transformations.\n data_dir: the directory of the data that contains the transform file\n downsample_folder_prefix: prefix of the newly generated downsampled images\n \"\"\"\n if self.downscale_factor is None:\n test_img = Image.open(data_dir / filepath)\n w, h = test_img.size\n max_res = max(w, h)\n df = 0\n while True:\n if (max_res / 2 ** (df)) < MAX_AUTO_RESOLUTION:\n break\n if not (data_dir / f\"{downsample_folder_prefix}{2**(df+1)}\" / filepath.name).exists():\n break\n df += 1\n\n self.downscale_factor = 2**df\n CONSOLE.log(f\"Auto-detected downscale factor: {self.downscale_factor}\")\n\n if self.downscale_factor > 1:\n return data_dir / f\"{downsample_folder_prefix}{self.downscale_factor}\" / filepath.name\n else:\n return data_dir / filepath\n\n def get_train_val_indices(self, eval_interval):\n all_indices = np.arange(len(self.ret_data['image_filenames'])).astype(np.int32)\n filter_func = lambda x: x % eval_interval == 0\n train_indices = all_indices[~filter_func(all_indices)]\n val_indices = all_indices[filter_func(all_indices)]\n return train_indices, val_indices" } ]
from curses import meta from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Type from rich.console import Console from typing_extensions import Literal from nerfstudio.utils.plotly_utils_nelfpro import plot_point3d from nerfstudio.data.utils.probe_sample import FactorPoseGenerator from nerfstudio.cameras import camera_utils from nerfstudio.cameras.cameras import Cameras, CameraType from nerfstudio.data.dataparsers.base_dataparser import ( DataParser, DataParserConfig, DataparserOutputs, ) from nerfstudio.data.scene_box import SceneBox from nerfstudio.data.dataparsers.raw_dataset_loader.llff_dataset_raw_loader import LLFFRawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.kitti360_dataset_raw_loader import KITTI360RawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.bungee_dataset_raw_loader import BungeeRawLoader from nerfstudio.data.dataparsers.raw_dataset_loader.nerf_dataset_raw_loader import NeRFStudioRawLoader import numpy as np import torch
20,994
from __future__ import annotations CONSOLE = Console(width=120) @dataclass class NeLFProDataParserConfig(DataParserConfig): """Configuration for the SpherRiFDataParser.""" _target: Type = field(default_factory=lambda: NeLFProDataParser) # raw dataset loader config raw_loader: Literal["llff", "kitti360", "bungee", "nerfstudio"] = "llff" data: Path = Path("./data/please_fill_in_the_path_to_your_raw_dataset") eval_interval: int = 8 eval_type: Literal["dev"] = "dev" # camera pose config scale_factor: float = 1.0 downscale_factor: Optional[int] = None scenebox_scale: int = 1.0 orientation_method: Literal["none", "up", "pca"] = "up" center_poses: bool = True auto_scale_poses: bool = True # probe generation config data_num_core: int = 3 data_num_basis: int = 64 use_kmeans_core: bool = True use_fps_basis: bool = True factor_pos_noise_scale: float = 0.02 # point cloud config point_cloud_sample_num: int = -1 @dataclass class NeLFProDataParser(DataParser): """Dataset Parser for Raw Mipnerf360 dataset.""" config: NeLFProDataParserConfig downscale_factor: Optional[int] = None def _generate_dataparser_outputs(self, split="train"): data_dir = Path(self.config.data) if self.config.raw_loader == "llff": raw_loader = LLFFRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "kitti360": raw_loader = KITTI360RawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None, eval_type=self.config.eval_type) elif self.config.raw_loader == 'bungee': raw_loader = BungeeRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "nerfstudio":
from __future__ import annotations CONSOLE = Console(width=120) @dataclass class NeLFProDataParserConfig(DataParserConfig): """Configuration for the SpherRiFDataParser.""" _target: Type = field(default_factory=lambda: NeLFProDataParser) # raw dataset loader config raw_loader: Literal["llff", "kitti360", "bungee", "nerfstudio"] = "llff" data: Path = Path("./data/please_fill_in_the_path_to_your_raw_dataset") eval_interval: int = 8 eval_type: Literal["dev"] = "dev" # camera pose config scale_factor: float = 1.0 downscale_factor: Optional[int] = None scenebox_scale: int = 1.0 orientation_method: Literal["none", "up", "pca"] = "up" center_poses: bool = True auto_scale_poses: bool = True # probe generation config data_num_core: int = 3 data_num_basis: int = 64 use_kmeans_core: bool = True use_fps_basis: bool = True factor_pos_noise_scale: float = 0.02 # point cloud config point_cloud_sample_num: int = -1 @dataclass class NeLFProDataParser(DataParser): """Dataset Parser for Raw Mipnerf360 dataset.""" config: NeLFProDataParserConfig downscale_factor: Optional[int] = None def _generate_dataparser_outputs(self, split="train"): data_dir = Path(self.config.data) if self.config.raw_loader == "llff": raw_loader = LLFFRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "kitti360": raw_loader = KITTI360RawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None, eval_type=self.config.eval_type) elif self.config.raw_loader == 'bungee': raw_loader = BungeeRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None) elif self.config.raw_loader == "nerfstudio":
raw_loader = NeRFStudioRawLoader(data_dir, downscale_factor=self.config.downscale_factor, partition_index=None)
12
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_frames\")\n self.mode = mode\n self.dtype = torch.float32\n self.test_set = subtest\n \n self.data_summary = pickle.load(open(os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_data.pkl\"), \"rb\"))\n self.obj_lists = list(self.data_summary.keys())\n self.device = \"cpu\"\n\n self.seq_len = 32 if self.mode == \"test\" else config.train_seq_len\n\n self.cur_vid = None\n self.video_frames = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def decode2binarymask(self, masks):\n mask = mask_utils.decode(masks)\n binary_masks = mask.astype('bool') # (Image_W,Image_H,128)\n binary_masks = binary_masks.transpose(2,0,1) #(128, Image_W, Image_H)\n return binary_masks\n\n def __len__(self):\n return len(self.obj_lists)\n\n def __getitem__(self, idx):\n v_id, obj_id = self.obj_lists[idx].split(\"_\")\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n fm_crop = []\n fm_no_crop = []\n vm_crop = []\n vm_no_crop = []\n img_crop = []\n \n obj_position = []\n\n counts = []\n loss_mask_weight = []\n\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n obj_dict = self.data_summary[self.obj_lists[idx]]\n timesteps = list(obj_dict.keys())\n assert np.all(np.diff(sorted(timesteps))==1)\n start_t, end_t = min(timesteps), max(timesteps)\n # print(start_t, end_t)\n if self.mode != \"test\" and end_t - start_t > self.seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.seq_len-2))\n end_t = start_t + self.seq_len - 1\n\n if self.mode == \"test\":\n if start_t + self.seq_len-1<=end_t:\n end_t = start_t + self.seq_len-1\n\n for t_step in range(start_t, end_t):\n image_path = os.path.join(self.img_path, v_id, str(t_step).zfill(5)+'.png')\n img = cv2.imread(image_path)[:,:,::-1]\n # get visible mask and full mask\n vm = self.decode2binarymask(obj_dict[t_step][\"VM\"])[0]\n fm = self.decode2binarymask(obj_dict[t_step][\"FM\"])[0] # 320, 480\n vx_min, vx_max, vy_min, vy_max = obj_dict[t_step][\"VM_bx\"]\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(320, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(480, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1])\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1])\n img_crop.append(img[vx_min:vx_max+1, vy_min:vy_max+1])\n\n vm_no_crop.append(vm)\n fm_no_crop.append(fm)\n # get loss mask\n loss_mask_weight.append(self.decode2binarymask(obj_dict[t_step][\"loss_mask_weight\"])[0])\n\n # for evaluation\n video_ids.append(int(v_id))\n object_ids.append(int(obj_id))\n frame_ids.append(t_step)\n counts.append(1)\n \n if True:\n num_pad = self.seq_len - (end_t - start_t)\n for _ in range(num_pad):\n obj_position.append(copy.deepcopy(obj_position[-1]))\n\n fm_crop.append(copy.deepcopy(fm_crop[-1]))\n fm_no_crop.append(copy.deepcopy(fm_no_crop[-1]))\n vm_crop.append(copy.deepcopy(vm_crop[-1]))\n vm_no_crop.append(copy.deepcopy(vm_no_crop[-1]))\n img_crop.append(copy.deepcopy(img_crop[-1]))\n\n loss_mask_weight.append(copy.deepcopy(loss_mask_weight[-1]))\n \n video_ids.append(video_ids[-1])\n object_ids.append(object_ids[-1])\n frame_ids.append(frame_ids[-1] + 1)\n counts.append(0)\n \n vm_crop, vm_crop_gt, fm_crop, img_crop, vm_pad, vm_scale = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_crop_gt = np.stack(vm_crop_gt, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(np.array(vm_crop_gt)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"img_crop\": img_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n \"counts\": counts,\n \"loss_mask\": loss_mask_weight, \n \"obj_position\": obj_position,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop_vm=None, img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_scale = []\n vm_crop_gt = []\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n vm_crop_gt.append(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n\n for i, m in enumerate(fm_crop_vm):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop_vm[i] = m\n\n for i, img_ in enumerate(img_crop):\n img_ = transform.rescale(img_, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img_\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, vm_crop_gt, fm_crop_vm, img_crop, vm_pad, vm_scale\n \n def getImg(self, v_id):\n imgs = []\n imgs_list = os.listdir(os.path.join(self.img_path, v_id))\n imgs_list.sort()\n for sub_path in imgs_list:\n img_path = os.path.join(self.img_path, v_id, sub_path)\n img_tmp = plt.imread(img_path)\n imgs.append(img_tmp)\n assert len(imgs) == 128\n return imgs\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n for item in sample_loader:\n yield item\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "MOViD_A", "path": "data/dataloader_MOViD_A.py", "snippet": "class MOViD_A(object):\n def __init__(self, config, mode):\n super(MOViD_A, self).__init__()\n self.mode = mode\n self.dtype = torch.float32\n self.device = \"cpu\"\n root_path = config.root_path\n self.data_dir = os.path.join(root_path, mode)\n \n self.instance_list = np.genfromtxt(\n os.path.join(root_path, \"{}_instance.txt\".format(mode)),\n dtype=np.str,\n encoding='utf-8'\n )\n\n self.train_seq_len = 24\n self.cur_vid = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def __len__(self):\n return len(self.instance_list)\n\n def __getitem__(self, idx, specified_V_O_id=None):\n # whether choose a specific instance to load\n if specified_V_O_id is None:\n v_id, obj_id, value = self.instance_list[idx].split(\"_\")\n else:\n v_id, obj_id, value = specified_V_O_id.split(\"_\")\n v_id, obj_id, value = int(v_id), int(obj_id), int(value)\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n self.video_path = os.path.join(self.data_dir, str(v_id))\n metadata = self.read_json(os.path.join(self.video_path, 'metadata.json'))\n\n self.num_frames = metadata[\"metadata\"][\"num_frames\"]\n self.height = metadata['metadata']['height']\n self.width = metadata['metadata']['width']\n self.instances = [self.format_instance_information(obj) for obj in metadata[\"instances\"]]\n\n vis_mask_paths = [os.path.join(self.video_path, \"segmentation_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n vis_mask = [np.array(Image.open(frame_path)) for frame_path in vis_mask_paths] #[t,h,w]\n\n full_mask_paths = [os.path.join(self.video_path, \"segmentation_{}_{}.png\".format(obj_id, str(f).zfill(5))) for f in range(self.num_frames)]\n full_mask = [np.array(Image.open(frame_path)) for frame_path in full_mask_paths] #[t,h,w]\n \n rgb_img_path = [os.path.join(self.video_path, \"rgba_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n rgb_img = [np.array(Image.open(frame_path))[...,:3] for frame_path in rgb_img_path]\n \n counts = []\n obj_position = []\n\n vm_crop = []\n vm_no_crop = []\n fm_crop = []\n fm_no_crop = []\n loss_mask_weight = []\n img_crop = []\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n timesteps = self.instances[obj_id]['bbox_frames']\n start_t, end_t = 0, 23\n if self.mode != \"test\" and end_t - start_t > self.train_seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.train_seq_len-2))\n end_t = start_t + self.train_seq_len - 1\n\n for t_step in range(start_t, end_t+1):\n Image_H, Image_W = self.height, self.width\n # some objects will move out the field of view in some frames\n if t_step in timesteps:\n index = self.instances[obj_id][\"bbox_frames\"].index(t_step)\n xmin, ymin, xmax, ymax = self.instances[obj_id][\"bboxes\"][index]\n vx_min, vy_min, vx_max, vy_max = int(Image_H*xmin), int(Image_W*ymin), int(Image_H*xmax), int(Image_W*ymax)\n counts.append(1)\n else:\n bboxs = mask_find_bboxs(full_mask[t_step].astype(np.uint8))\n \n if bboxs.size==0:\n vx_min, vy_min, vx_max, vy_max = 0, 0, 256, 256\n else:\n b = bboxs[-1][:4]\n vx_min, vy_min, vx_max, vy_max = b[1], b[0], b[1]+b[3], b[0]+b[2]\n counts.append(0)\n\n # enlarge the bbox\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(Image_H, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(Image_W, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n\n # get mask\n vm = vis_mask[t_step]\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n vm_no_crop.append(vm==value)\n\n fm = full_mask[t_step]\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n fm_no_crop.append(fm==value)\n \n # get image\n image = rgb_img[t_step]\n img_crop.append(image[vx_min:vx_max+1, vy_min:vy_max+1])\n\n # get loss mask\n fore_ground = vm == 0\n obj_ground = vm==value\n loss_mask = np.logical_or(fore_ground, obj_ground)\n\n loss_mask_weight.append(loss_mask)\n\n # for evaluation\n video_ids.append(v_id)\n object_ids.append(obj_id)\n frame_ids.append(t_step)\n\n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n \n vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt, img_crop = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n # fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n img_crop = np.stack(img_crop, axis=0) # Sqe_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_no_crop\": vm_no_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n\n \"img_crop\": img_crop,\n \n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n\n \"obj_position\": obj_position, \n \"loss_mask\": loss_mask_weight, \n \"counts\": counts,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop=None,img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_crop_gt = []\n vm_scale = []\n for i, img in enumerate(img_crop):\n img = transform.rescale(img, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img = np.pad(img, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n\n for i, m in enumerate(fm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop[i] = m\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt,img_crop\n \n def read_json(self,dir_):\n with open(dir_) as f:\n data = json.load(f)\n return data\n\n def format_instance_information(self, obj):\n return {\n \"bboxes\": obj[\"bboxes\"],\n \"bbox_frames\": obj[\"bbox_frames\"],\n }\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "Kins_Fusion_dataset", "path": "data/dataloader_KINS.py", "snippet": "class Kins_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(Kins_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode),\"image_2\")\n \n # Load the GT of AISFormer\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n \n # Load the GT of vanilla KINS\n self.base_img_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.base_ann_path= os.path.join(self.root_path, \"update_{}_2020.json\".format(mode))\n annotations = cvb.load(self.base_ann_path)\n imgs_info = annotations['images']\n anns_info = annotations[\"annotations\"]\n self.imgs_dict, self.anns_dict = self.make_json_dict(imgs_info, anns_info)\n\n # dataloader setting\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # load aisformer predicted visible masks\n if \"aisformer\" in self.label_info[index]:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = np.array(Image.open(img_path))\n instances = self.data_info['{}_{}'.format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n fm_no_crop = fm_no_crop[..., np.newaxis]\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n \n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(np.array(vm_no_crop_gt)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n return meta\n else:\n img_id, anno_id, category_id = self.label_info[index].split(\"_\")\n img_id, anno_id, category_id = int(img_id), int(anno_id), int(category_id)\n\n img_name = self.imgs_dict[img_id]\n img_path = os.path.join(self.base_img_path, img_name)\n \n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n \n ann = self.anns_dict[img_id][anno_id]\n fm_no_crop = self.polys_to_mask(ann[\"a_segm\"], height, width)\n vm_no_crop = self.polys_to_mask(ann[\"i_segm\"], height, width)\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"i_bbox\"]\n\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n # vm_crop here is the GT\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n return meta\n\n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.55:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.55 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" }, { "identifier": "KINS_Aisformer_VRSP_Intersection", "path": "data/dataloader_KINS.py", "snippet": "class KINS_Aisformer_VRSP_Intersection(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(KINS_Aisformer_VRSP_Intersection, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Intersection dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"kins_intersection.pkl\"), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"kins_intersection_list.txt\"), dtype=np.str, encoding='utf-8')\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def mask_find_bboxs(self, mask):\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)\n stats = stats[stats[:,4].argsort()]\n return stats\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n instances = self.data_info[image_id][anno_id]\n\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = Image.open(img_path)\n img = img.resize((width,height), Image.ANTIALIAS)\n img = np.array(img)\n \n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n # fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n \n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n\n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n \n fm_no_crop = fm_no_crop[..., np.newaxis]\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n \n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n refine_loss_mask = 1 - (vm_crop_gt==vm_crop).astype(bool)\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n # import pdb;pdb.set_trace()\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(vm_no_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n refine_loss_mask = torch.from_numpy(np.array(refine_loss_mask)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n # elif self.mode==\"test\":\n # meta = {\n # # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # # \"vm_no_crop_gt\": vm_no_crop_gt,\n # # \"refine_loss_mask\": refine_loss_mask,\n # # \"fm_no_crop\": fm_no_crop,\n # \"fm_crop\": fm_crop,\n # \"img_crop\": img_crop,\n # # \"loss_mask\": loss_mask,\n # # \"obj_position\": obj_position,\n # # \"vm_pad\": vm_pad,\n # # \"vm_scale\": vm_scale,\n # # \"counts\":counts,\n # # \"img_id\": image_id,\n # # \"anno_id\": anno_id,\n # # # for vq\n # # # \"mask_crop\": fm_crop\n # # # \"img\":img,\n # }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img\":img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_Fusion_dataset", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset \n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if mode==\"train\":\n train_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_train2014_with_classes.json\"))\n self.anns_dict = train_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif mode==\"test\":\n val_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_val2014_with_classes.json\"))\n self.anns_dict = val_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n \n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # predicted vm\n if len(self.label_info[index].split(\",\"))==3:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[\"{}_{}\".format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n # occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n # import pdb;pdb.set_trace()\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n # if self.mode==\"test\":\n # loss_mask = mask_utils.decode([instances[\"loss_mask\"]]).astype(bool)[...,0]\n # else:\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"img_crop\": img_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # \"occlude_rate\":occlude_rate\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n return meta\n # gt vm\n elif len(self.label_info[index].split(\",\"))==2:\n anno_id, img_path = self.label_info[index].split(\",\")\n anno_id = int(anno_id)\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n\n ann = self.anns_dict[anno_id]\n img_id = ann[\"image_id\"]\n # category_id = ann[\"category_id\"]\n\n full_mask = ann[\"segmentation\"]\n fm_no_crop = mask_utils.decode(full_mask)[...,np.newaxis]\n\n visible_mask = ann[\"visible_mask\"]\n vm_no_crop = mask_utils.decode(visible_mask)[...,np.newaxis]\n\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"bbox\"]\n y_max, x_max = y_min + w, x_min + h\n y_min, x_min, y_max, x_max = int(y_min), int(x_min), int(y_max), int(x_max) \n\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n # full_pad = ((0, max(375-height, 0)), (0, max(1242-width, 0)))\n # vm_no_crop = np.pad(vm_no_crop, full_pad)[:375, :1242]\n # fm_no_crop = np.pad(fm_no_crop, full_pad)[:375, :1242]\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(img_crop).to(self.dtype).to(self.device)\n img = torch.from_numpy(img).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n \n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\": counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img_no_crop\": img,\n }\n return meta\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n\n # def data_augmentation(self, mask):\n # return mask\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.9:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.6:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.6 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_VRSP", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_VRSP(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_VRSP, self).__init__()\n self.config = config\n self.mode = mode\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if self.mode==\"train\":\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif self.mode==\"test\":\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[image_id][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"occlude_rate\":occlude_rate,\n # # for vq\n # \"mask_crop\": fm_crop,\n \"img\": img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" } ]
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,188
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA":
train_dataset = COCOA_Fusion_dataset(config, mode='train')
4
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/baichuan/modeling_baichuan.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n def _get_generation_mode(\n self, generation_config: GenerationConfig, assistant_model: Optional[\"PreTrainedModel\"]\n ) -> GenerationMode:\n \"\"\"\n Returns the generation mode triggered by a [`GenerationConfig`] instance.\n \"\"\"\n if generation_config.constraints is not None or generation_config.force_words_ids is not None:\n generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n elif generation_config.num_beams == 1:\n if generation_config.do_sample is False:\n if (\n generation_config.top_k is not None\n and generation_config.top_k > 1\n and generation_config.penalty_alpha is not None\n and generation_config.penalty_alpha > 0\n ):\n generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n elif generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.GREEDY_SEARCH\n else:\n if generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.SAMPLE\n else:\n if generation_config.num_beam_groups > 1:\n generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n elif generation_config.do_sample is True:\n generation_mode = GenerationMode.BEAM_SAMPLE\n else:\n generation_mode = GenerationMode.BEAM_SEARCH\n\n # Assisted generation may extend some generation modes\n if assistant_model is not None:\n if generation_mode in (\"greedy_search\", \"sample\"):\n generation_mode = GenerationMode.ASSISTED_GENERATION\n else:\n raise ValueError(\n \"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate \"\n \"is only supported with Greedy Search and Sample.\"\n )\n return generation_mode\n\n @torch.no_grad()\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n r\"\"\"\n\n Generates sequences of token ids for models with a language modeling head.\n\n <Tip warning={true}>\n\n Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the\n model's default generation configuration. You can override any `generation_config` by passing the corresponding\n parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.\n\n For an overview of generation strategies and code examples, check out the [following\n guide](../generation_strategies).\n\n </Tip>\n\n Parameters:\n inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):\n The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the\n method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`\n should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of\n `input_ids`, `input_values`, `input_features`, or `pixel_values`.\n generation_config (`~generation.GenerationConfig`, *optional*):\n The generation configuration to be used as base parametrization for the generation call. `**kwargs`\n passed to generate matching the attributes of `generation_config` will override them. If\n `generation_config` is not provided, the default will be used, which had the following loading\n priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model\n configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s\n default values, whose documentation should be checked to parameterize generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n Custom logits processors that complement the default logits processors built from arguments and\n generation config. If a logit processor is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n Custom stopping criteria that complement the default stopping criteria built from arguments and a\n generation config. If a stopping criteria is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):\n If provided, this function constraints the beam search to allowed tokens only at each step. If not\n provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and\n `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned\n on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful\n for constrained generation conditioned on the prefix, as described in [Autoregressive Entity\n Retrieval](https://arxiv.org/abs/2010.00904).\n synced_gpus (`bool`, *optional*):\n Whether to continue running the while loop until max_length. Unless overridden this flag will be set to\n `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished\n generating before other GPUs. Otherwise it'll be set to `False`.\n assistant_model (`PreTrainedModel`, *optional*):\n An assistant model that can be used to accelerate generation. The assistant model must have the exact\n same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model\n is much faster than running generation with the model you're calling generate from. As such, the\n assistant model should be much smaller.\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n kwargs (`Dict[str, Any]`, *optional*):\n Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be\n forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder\n specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.\n\n Return:\n [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`\n or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.\n\n If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchDecoderOnlyOutput`],\n - [`~generation.SampleDecoderOnlyOutput`],\n - [`~generation.BeamSearchDecoderOnlyOutput`],\n - [`~generation.BeamSampleDecoderOnlyOutput`]\n\n If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchEncoderDecoderOutput`],\n - [`~generation.SampleEncoderDecoderOutput`],\n - [`~generation.BeamSearchEncoderDecoderOutput`],\n - [`~generation.BeamSampleEncoderDecoderOutput`]\n \"\"\"\n\n if synced_gpus is None:\n # if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:\n # synced_gpus = True\n # else:\n # synced_gpus = False\n synced_gpus = False\n\n # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call\n self._validate_model_class()\n\n # priority: `generation_config` argument > `model.generation_config` (the default generation config)\n if generation_config is None:\n # legacy: users may modify the model configuration to control generation -- update the generation config\n # model attribute accordingly, if it was created from the model config\n if self.generation_config._from_model_config:\n new_generation_config = GenerationConfig.from_model_config(self.config)\n if new_generation_config != self.generation_config:\n # warnings.warn(\n # \"You have modified the pretrained model configuration to control generation. This is a\"\n # \" deprecated strategy to control generation and will be removed soon, in a future version.\"\n # \" Please use a generation configuration file (see\"\n # \" https://huggingface.co/docs/transformers/main_classes/text_generation )\"\n # )\n self.generation_config = new_generation_config\n generation_config = self.generation_config\n\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs\n generation_config.validate()\n self._validate_model_kwargs(model_kwargs.copy())\n if not hasattr(generation_config, 'decoding_kwargs'):\n generation_config.decoding_kwargs = model_kwargs.get('decoding_kwargs', {})\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:\n if model_kwargs.get(\"attention_mask\", None) is None:\n logger.warning(\n \"The attention mask and the pad token id were not set. As a consequence, you may observe \"\n \"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\"\n )\n eos_token_id = generation_config.eos_token_id\n if isinstance(eos_token_id, list):\n eos_token_id = eos_token_id[0]\n logger.warning(f\"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.\")\n generation_config.pad_token_id = eos_token_id\n\n # 3. Define model inputs\n # inputs_tensor has to be defined\n # model_input_name is defined if model-specific keyword input is passed\n # otherwise model_input_name is None\n # all model-specific keyword inputs are removed from `model_kwargs`\n inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(\n inputs, generation_config.bos_token_id, model_kwargs\n )\n batch_size = inputs_tensor.shape[0]\n\n # 4. Define other model kwargs\n model_kwargs[\"output_attentions\"] = generation_config.output_attentions\n model_kwargs[\"output_hidden_states\"] = generation_config.output_hidden_states\n # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are\n # generating the first new token or not, and we only want to use the embeddings for the first new token)\n if not self.config.is_encoder_decoder and model_input_name == \"inputs_embeds\":\n model_kwargs[\"use_cache\"] = True\n else:\n model_kwargs[\"use_cache\"] = generation_config.use_cache\n\n accepts_attention_mask = \"attention_mask\" in set(inspect.signature(self.forward).parameters.keys())\n requires_attention_mask = \"encoder_outputs\" not in model_kwargs\n\n if model_kwargs.get(\"attention_mask\", None) is None and requires_attention_mask and accepts_attention_mask:\n model_kwargs[\"attention_mask\"] = self._prepare_attention_mask_for_generation(\n inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id\n )\n\n # decoder-only models should use left-padding for generation\n if not self.config.is_encoder_decoder:\n # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`\n # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.\n if (\n generation_config.pad_token_id is not None\n and len(inputs_tensor.shape) == 2\n and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0\n ):\n logger.warning(\n \"A decoder-only architecture is being used, but right-padding was detected! For correct \"\n \"generation results, please set `padding_side='left'` when initializing the tokenizer.\"\n )\n\n if self.config.is_encoder_decoder and \"encoder_outputs\" not in model_kwargs:\n # if model is encoder decoder encoder_outputs are created\n # and added to `model_kwargs`\n model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, model_kwargs, model_input_name\n )\n\n # 5. Prepare `input_ids` which will be used for auto-regressive generation\n if self.config.is_encoder_decoder:\n input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(\n batch_size=batch_size,\n model_input_name=model_input_name,\n model_kwargs=model_kwargs,\n decoder_start_token_id=generation_config.decoder_start_token_id,\n bos_token_id=generation_config.bos_token_id,\n device=inputs_tensor.device,\n )\n else:\n input_ids = inputs_tensor if model_input_name == \"input_ids\" else model_kwargs.pop(\"input_ids\")\n\n if streamer is not None:\n streamer.put(input_ids.cpu())\n\n # 6. Prepare `max_length` depending on other stopping criteria.\n input_ids_length = input_ids.shape[-1]\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if generation_config.max_new_tokens is not None:\n if not has_default_max_length:\n logger.warning(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\"\n )\n generation_config.max_length = generation_config.max_new_tokens + input_ids_length\n\n # 7. determine generation mode\n generation_mode = self._get_generation_mode(generation_config, assistant_model)\n\n if streamer is not None and (generation_config.num_beams > 1):\n raise ValueError(\n \"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1.\"\n )\n\n if self.device.type != input_ids.device.type:\n warnings.warn(\n \"You are calling .generate() with the `input_ids` being on a device type different\"\n f\" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model\"\n f\" is on {self.device.type}. You may experience unexpected behaviors or slower generation.\"\n \" Please make sure that you have put `input_ids` to the\"\n f\" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before\"\n \" running `.generate()`.\",\n UserWarning,\n )\n\n # 8. prepare distribution pre_processing samplers\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_length,\n encoder_input_ids=inputs_tensor,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n # 9. prepare stopping criteria\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n\n decoding_kwargs = generation_config.decoding_kwargs if hasattr(generation_config, 'decoding_kwargs') else {}\n decoding_kwargs['generation_mode'] = generation_mode\n decoding_kwargs['do_sample'] = generation_config.do_sample\n decoding_kwargs['inputs_embeds_position'] = generation_config.inputs_embeds_position if hasattr(generation_config, 'inputs_embeds_position') else 0\n decoding_kwargs['max_length'] = generation_config.max_length\n if generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n decoding_kwargs['decoding_max_length'] = generation_config.max_length + decoding_length + 1\n else:\n decoding_kwargs['decoding_max_length'] = generation_config.max_length\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n\n # 10. go into different generation modes\n if generation_mode == GenerationMode.ASSISTED_GENERATION:\n if generation_config.num_return_sequences > 1:\n raise ValueError(\n \"num_return_sequences has to be 1 when doing assisted generate, \"\n f\"but is {generation_config.num_return_sequences}.\"\n )\n if batch_size > 1:\n raise ValueError(\"assisted generate is only supported for batch_size = 1\")\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"assisted generate requires `use_cache=True`\")\n\n # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs\n if assistant_model.config.is_encoder_decoder:\n assistant_model_kwargs = copy.deepcopy(model_kwargs)\n inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs(\n inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs\n )\n assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, assistant_model_kwargs, model_input_name\n )\n model_kwargs[\"assistant_encoder_outputs\"] = assistant_model_kwargs[\"encoder_outputs\"]\n\n # 12. run assisted generate\n return self.assisted_decoding(\n input_ids,\n assistant_model=assistant_model,\n do_sample=generation_config.do_sample,\n logits_processor=logits_processor,\n logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n if generation_mode == GenerationMode.GREEDY_SEARCH:\n # 11. run greedy search\n return self.greedy_search(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n # 11. run greedy search\n return self.lookahead_generation(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"Contrastive search requires `use_cache=True`\")\n\n return self.contrastive_search(\n input_ids,\n top_k=generation_config.top_k,\n penalty_alpha=generation_config.penalty_alpha,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n sequential=generation_config.low_memory,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. expand input_ids with `num_return_sequences` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_return_sequences,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 13. run sample\n return self.sample(\n input_ids,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n\n # 13. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 14. run beam sample\n return self.beam_sample(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n num_beam_groups=generation_config.num_beam_groups,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.group_beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:\n final_constraints = []\n if generation_config.constraints is not None:\n final_constraints = generation_config.constraints\n\n if generation_config.force_words_ids is not None:\n\n def typeerror():\n raise ValueError(\n \"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`\"\n f\"of positive integers, but is {generation_config.force_words_ids}.\"\n )\n\n if (\n not isinstance(generation_config.force_words_ids, list)\n or len(generation_config.force_words_ids) == 0\n ):\n typeerror()\n\n for word_ids in generation_config.force_words_ids:\n if isinstance(word_ids[0], list):\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any(not isinstance(token_ids, list) for token_ids in word_ids):\n typeerror()\n if any(\n any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)\n for token_ids in word_ids\n ):\n typeerror()\n\n constraint = DisjunctiveConstraint(word_ids)\n else:\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):\n typeerror()\n\n constraint = PhrasalConstraint(word_ids)\n final_constraints.append(constraint)\n\n # 11. prepare beam search scorer\n constrained_beam_scorer = ConstrainedBeamSearchScorer(\n constraints=final_constraints,\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.constrained_beam_search(\n input_ids,\n constrained_beam_scorer=constrained_beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n def lookahead_prepare_inputs_for_generation(self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs):\n position_ids = kwargs.get(\"position_ids\", None)\n\n decoding_kwargs = kwargs.get('decoding_kwargs', {})\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n branch_length = decoding_kwargs.get('branch_length', 12)\n decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')\n max_length = decoding_kwargs.get('max_length', 2048)\n update_branch_length = min(branch_length, max_length - input_ids.size(-1))\n assert update_branch_length > 0, f'{branch_length=} {max_length=} {input_ids.size(-1)=} {update_branch_length=}'\n\n if past_key_values is None:\n if inputs_embeds is not None and input_ids is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds, \"input_ids\": input_ids}\n length = input_ids.size(1)\n elif input_ids is not None:\n model_inputs = {\"input_ids\": input_ids}\n length = input_ids.size(1)\n elif inputs_embeds is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n length = input_ids.size(1)\n else:\n raise ValueError('either input_ids or inputs_embeds is not None')\n update_attention_mask = attention_mask[:, :, :length, :length]\n\n model_inputs.update(\n {\"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": update_attention_mask,\n \"decoding_kwargs\": decoding_kwargs\n })\n\n if position_ids is not None:\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, encoding=True, length=length)\n\n else:\n decoding_qids = input_ids[0, -2:].tolist()\n # decoding_qids = decoding_kwargs['input_id_list'][0][-2:]\n min_input_size = 0\n min_output_size = max(decoding_length // 2, 1)\n\n if decoding_mode in ('hier', 'par', 'one'):\n decoding_mode = decoding_mode + '_mix'\n fmt, mode = decoding_mode.split('_')\n method_name = fmt + '_get'\n\n decoding_ids, decoding_masks, sizes = getattr(self.lookahead_cache, method_name)(decoding_qids,\n decoding_length=decoding_length,\n branch_length=update_branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=0)\n\n decoding_input_ids = torch.tensor([decoding_ids], dtype=torch.long, device=input_ids.device)\n prefix_length = input_ids.size(-1) - 1\n fresh_length = len(decoding_ids)\n ppl = prefix_length + fresh_length\n assert ppl <= attention_mask.size(2), \\\n f'{max_length=} {update_branch_length=} {prefix_length=} {fresh_length=} {attention_mask.shape=}'\n prefix_mask_tensor = attention_mask[:, :, prefix_length:ppl, :prefix_length]\n decoding_mask_tensor = torch.from_numpy(decoding_masks[None, None]).to(\n dtype=attention_mask.dtype, device=attention_mask.device)\n decoding_attention_mask = torch.cat([prefix_mask_tensor, decoding_mask_tensor], dim=3)\n\n decoding_kwargs.update({'decoding_qids': decoding_qids,\n 'decoding_ids': decoding_ids,\n 'decoding_masks': decoding_masks,\n 'sizes': sizes,\n })\n model_inputs = {'decoding_kwargs': decoding_kwargs}\n\n model_inputs.update(\n {\n \"input_ids\": decoding_input_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": decoding_attention_mask\n }\n )\n if position_ids is not None:\n indices = torch.sum(decoding_attention_mask, dim=3).squeeze(1)[0]\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, indices=indices, encoding=False)\n\n return model_inputs\n\n def _get_position_ids(self, full_position_ids, indices=None, length=None, encoding=True):\n if encoding:\n return full_position_ids[..., :length]\n else:\n return full_position_ids[..., indices]\n\n def _lookahead_update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n logits_processor: Optional[LogitsProcessorList] = None,\n input_ids: Optional[torch.Tensor] = None,\n ) -> Dict[str, Any]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_ids = decoding_kwargs.get('decoding_ids', [])\n if len(decoding_ids) <= 1:\n next_token_logits = outputs.logits[:, -1:, :]\n # pre-process distribution\n # next_tokens_scores = logits_processor(input_ids, next_token_logits)\n bs, nt, nv = next_token_logits.shape\n next_tokens_scores = logits_processor(input_ids, next_token_logits.squeeze(1)).unsqueeze(1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_tokens_scores'] = next_tokens_scores\n next_token_list = next_tokens.tolist()\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(1)\n decoding_kwargs['edls'].append(1)\n if decoding_kwargs.get('debug_lookahead', False):\n decoding_qids = decoding_kwargs.get('decoding_qids', [])\n print(f'size:0 query:{decoding_qids} next_token:{next_token_list[0]}')\n else:\n # TODO: accurate logit_processor\n # next_tokens_scores = logits_processor(input_ids, outputs.logits)\n bs, nt, nv = outputs.logits.shape\n next_tokens_scores = logits_processor(input_ids.repeat(1, nt).view(bs * nt, -1),\n outputs.logits.view(bs * nt, -1)).view(bs, nt, -1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n bs, nt, nv = probs.shape\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n\n next_token_list = next_tokens.tolist()[0]\n decoding_ids = decoding_kwargs['decoding_ids'][1:]\n decoding_mask = decoding_kwargs['decoding_masks']\n sizes = decoding_kwargs['sizes']\n\n max_match_index = 0\n max_match_count = 0\n max_decoding_ids_slice = None\n max_next_token_slice = None\n \n for i in range(len(decoding_ids)):\n mask_indices = np.nonzero(decoding_mask[i + 1, 1:])[0]\n decoding_ids_slice = [decoding_ids[j] for j in mask_indices] \n next_token_slice = [next_token_list[0]] + [next_token_list[j + 1] for j in mask_indices]\n \n c = len(decoding_ids_slice)\n for j, p in enumerate(decoding_ids_slice):\n if next_token_slice[j] != p:\n c = j\n break\n if c > max_match_count:\n max_match_count = c\n max_match_index = i\n if c >= max_match_count:\n max_decoding_ids_slice = decoding_ids_slice\n max_next_token_slice = next_token_slice\n # if decoding_kwargs['eos'] in decoding_ids:\n # max_match_count = 0\n\n prefix_plus_count = input_ids.size(-1)\n match_idx = np.nonzero(decoding_mask[max_match_index + 1, 1:])[0][:max_match_count]\n if len(decoding_ids) != max_match_count:\n past = model_kwargs[\"past_key_values\"]\n device = past[0][0].device\n kv_idx = torch.tensor(match_idx + prefix_plus_count, dtype=torch.long, device=device)\n model_kwargs[\"past_key_values\"] = self._update_cache(past,\n kv_idx,\n prefix_and_next_count=prefix_plus_count,\n max_match_count=max_match_count,\n max_match_index=max_match_index)\n\n next_token_list = [next_token_list[0:1] + [next_token_list[x + 1] for x in match_idx]]\n next_tokens = torch.tensor(next_token_list, dtype=torch.long, device=input_ids.device)\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(len(decoding_ids))\n decoding_kwargs['edls'].append(max_match_count + 1)\n if decoding_kwargs.get('debug_lookahead', False):\n lengths = np.sum(decoding_mask, axis=1) - 1\n l = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)\n ls = ','.join(l.astype(np.str_))\n decoding_qids = decoding_kwargs['decoding_qids']\n size_str = ','.join([str(x) for x in sizes])\n print(\n f'decoding_length:{len(decoding_ids)+1} accept_length:{max_match_count+1} '\n f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '\n f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')\n\n return model_kwargs\n\n def _update_cache(self, past_key_values, kv_idx, prefix_and_next_count=None, max_match_count=None,\n max_match_index=None):\n update_past_key_values = []\n for k, v in past_key_values:\n if max_match_index + 1 == max_match_count:\n k = k[:, :, :prefix_and_next_count + max_match_count]\n v = v[:, :, :prefix_and_next_count + max_match_count]\n else:\n k = torch.concat([k[:, :, :prefix_and_next_count], k[:, :, kv_idx]], 2)\n v = torch.concat([v[:, :, :prefix_and_next_count], v[:, :, kv_idx]], 2)\n update_past_key_values.append((k, v))\n return tuple(update_past_key_values)\n\n def lookahead_generation(\n self,\n input_ids: torch.LongTensor,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n max_length: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[Union[int, List[int]]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_scores: Optional[bool] = None,\n return_dict_in_generate: Optional[bool] = None,\n synced_gpus: bool = False,\n streamer: Optional[\"BaseStreamer\"] = None,\n **model_kwargs,\n ) -> Union[GreedySearchOutput, torch.LongTensor]:\n r\"\"\"\n Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be\n used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.\n\n <Tip warning={true}>\n\n In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()\n instead. For an overview of generation strategies and code examples, check the [following\n guide](../generation_strategies).\n\n </Tip>\n\n\n Parameters:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The sequence used as a prompt for the generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]\n used to modify the prediction scores of the language modeling head applied at each generation step.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]\n used to tell if the generation loop should stop.\n\n max_length (`int`, *optional*, defaults to 20):\n **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated\n tokens. The maximum length of the sequence to be generated.\n pad_token_id (`int`, *optional*):\n The id of the *padding* token.\n eos_token_id (`Union[int, List[int]]`, *optional*):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more details.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more details.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether or not to return the prediction scores. See `scores` under returned tensors for more details.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n synced_gpus (`bool`, *optional*, defaults to `False`):\n Whether to continue running the while loop until max_length (needed for ZeRO stage 3)\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n model_kwargs:\n Additional model specific keyword arguments will be forwarded to the `forward` function of the model.\n If model is an encoder-decoder model the kwargs should include `encoder_outputs`.\n\n Return:\n [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or\n `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a\n [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and\n `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if\n `model.config.is_encoder_decoder=True`.\n\n Examples:\n\n ```python\n >>> from transformers import (\n ... AutoTokenizer,\n ... AutoModelForCausalLM,\n ... LogitsProcessorList,\n ... MinLengthLogitsProcessor,\n ... StoppingCriteriaList,\n ... MaxLengthCriteria,\n ... )\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token\n >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id\n\n >>> input_prompt = \"It might be possible to\"\n >>> input_ids = tokenizer(input_prompt, return_tensors=\"pt\").input_ids\n\n >>> # instantiate logits processors\n >>> logits_processor = LogitsProcessorList(\n ... [\n ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),\n ... ]\n ... )\n >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])\n\n >>> outputs = model.greedy_search(\n ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria\n ... )\n\n >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)\n [\"It might be possible to get a better understanding of the nature of the problem, but it's not\"]\n ```\"\"\"\n # init values\n\n if not hasattr(self, 'lookahead_cache'):\n self.lookahead_cache = LookaheadCache()\n\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n if max_length is not None:\n warnings.warn(\n \"`max_length` is deprecated in this function, use\"\n \" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.\",\n UserWarning,\n )\n stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)\n pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id_tensor = torch.tensor(eos_token_id, device=input_ids.device) if eos_token_id is not None else None\n output_scores = output_scores if output_scores is not None else self.generation_config.output_scores\n output_attentions = (\n output_attentions if output_attentions is not None else self.generation_config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states\n )\n return_dict_in_generate = (\n return_dict_in_generate\n if return_dict_in_generate is not None\n else self.generation_config.return_dict_in_generate\n )\n\n # init attention / hidden states / scores tuples\n scores = () if (return_dict_in_generate and output_scores) else None\n decoder_attentions = () if (return_dict_in_generate and output_attentions) else None\n cross_attentions = () if (return_dict_in_generate and output_attentions) else None\n decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None\n\n # if model is an encoder-decoder, retrieve encoder attention weights and hidden states\n if return_dict_in_generate and self.config.is_encoder_decoder:\n encoder_attentions = model_kwargs[\"encoder_outputs\"].get(\"attentions\") if output_attentions else None\n encoder_hidden_states = (\n model_kwargs[\"encoder_outputs\"].get(\"hidden_states\") if output_hidden_states else None\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_kwargs.update({\n 'eos': eos_token_id[0] if eos_token_id is not None else 2,\n 'edls': [],\n 'dls': [],\n 'fts': []\n })\n\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n stop_max_length = stopping_criteria.max_length\n decoding_max_length = stop_max_length + decoding_length + 1\n attention_mask = model_kwargs.get('attention_mask', None)\n input_device = input_ids.device\n if attention_mask is None:\n bs = input_ids.size(0)\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long, device=input_device),\n 0)\n elif len(attention_mask.shape) == 2:\n # from [bs, src_len] to [bs,1,max_len,max_len]\n bs, src_len = attention_mask.shape\n pad_len = decoding_max_length - src_len\n attention_mask = attention_mask.long()\n if pad_len > 0:\n pad_mask = torch.ones((bs, pad_len), dtype=torch.long, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, pad_mask], 1)\n full_attention_mask = torch.tril(attention_mask[:, None, None].expand(-1, -1, decoding_max_length, -1), 0)\n elif len(attention_mask.shape) == 4:\n bs, _, src_len, tgt_len = attention_mask.shape\n attention_mask = attention_mask.long()\n if src_len < decoding_max_length or tgt_len < decoding_max_length:\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long,\n device=input_device),\n 0)\n full_attention_mask[:, :, :src_len, :tgt_len] = attention_mask\n else:\n full_attention_mask = attention_mask\n else:\n raise ValueError(f'unsupport attention_mask.shape:{attention_mask.shape}')\n model_kwargs['attention_mask'] = full_attention_mask\n decoding_kwargs['max_length'] = stop_max_length\n decoding_kwargs['decoding_max_length'] = decoding_max_length\n\n # keep track of which sequences are already finished\n unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device)\n\n assert input_ids.size(0) == 1\n input_id_list = input_ids[0].tolist()\n decoding_kwargs['input_id_list'] = [input_id_list]\n branch_length = decoding_kwargs.get('branch_length', 12)\n self.lookahead_cache.put(input_id_list[1:], branch_length=branch_length + 1, mode='input', idx=0)\n ts = time.time()\n\n this_peer_finished = False # used by synced_gpus only\n while True:\n if synced_gpus:\n # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.\n # The following logic allows an early break if all peers finished generating their sequence\n this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)\n # send 0.0 if we finished, 1.0 otherwise\n dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n # did all peers finish? the reduced sum will be 0.0 then\n if this_peer_finished_flag.item() == 0.0:\n break\n\n # prepare model inputs\n model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)\n decoding_kwargs = model_inputs.pop('decoding_kwargs', {})\n\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n if synced_gpus and this_peer_finished:\n continue # don't waste resources running the code we don't need\n\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n model_kwargs = self._lookahead_update_model_kwargs_for_generation(\n outputs,\n model_kwargs,\n is_encoder_decoder=self.config.is_encoder_decoder,\n input_ids=input_ids,\n logits_processor=logits_processor\n )\n\n next_tokens = model_kwargs['next_tokens']\n next_tokens_scores = model_kwargs['next_tokens_scores']\n next_token_list = model_kwargs['next_token_list']\n\n # finished sentences should have their next token be a padding token\n if eos_token_id is not None:\n if pad_token_id is None:\n raise ValueError(\"If `eos_token_id` is defined, make sure that `pad_token_id` is defined.\")\n next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)\n\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n if streamer is not None:\n streamer.put(next_token_list)\n\n self.lookahead_cache.stream_put(next_token_list[0], branch_length=branch_length + 1, final=False,\n mode='output', idx=0)\n\n # Store scores, attentions and hidden_states when required\n if return_dict_in_generate:\n if output_scores:\n scores += (next_tokens_scores,)\n if output_attentions:\n decoder_attentions += (\n (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)\n )\n if self.config.is_encoder_decoder:\n cross_attentions += (outputs.cross_attentions,)\n\n if output_hidden_states:\n decoder_hidden_states += (\n (outputs.decoder_hidden_states,)\n if self.config.is_encoder_decoder\n else (outputs.hidden_states,)\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n if eos_token_id_tensor is not None:\n # unfinished_sequences = unfinished_sequences.mul(\n # next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n # )\n unfinished_sequences = unfinished_sequences.mul(\n next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1))\n\n # stop when each sentence is finished\n if unfinished_sequences.max() == 0:\n this_peer_finished = True\n\n # stop if we exceed the maximum length\n if stopping_criteria(input_ids, scores):\n this_peer_finished = True\n\n te = time.time()\n model_kwargs['decoding_kwargs']['fts'].append(te - ts)\n ts = te\n if this_peer_finished and not synced_gpus:\n self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True,\n mode='output', idx=0)\n break\n\n if streamer is not None:\n streamer.end()\n\n if return_dict_in_generate:\n if self.config.is_encoder_decoder:\n return GreedySearchEncoderDecoderOutput(\n sequences=input_ids,\n scores=scores,\n encoder_attentions=encoder_attentions,\n encoder_hidden_states=encoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=cross_attentions,\n decoder_hidden_states=decoder_hidden_states,\n )\n else:\n kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],\n 'edls': model_kwargs['decoding_kwargs']['edls'],\n 'fts': model_kwargs['decoding_kwargs']['fts']}\n return LookaheadDecoderOnlyOutput(\n sequences=input_ids,\n scores=scores,\n attentions=decoder_attentions,\n hidden_states=decoder_hidden_states,\n kwargs=kwargs\n )\n else:\n return input_ids\n\n def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):\n \"\"\"Validates model kwargs for generation. Generate argument typos will also be caught here.\"\"\"\n # Excludes arguments that are handled before calling any model function\n if self.config.is_encoder_decoder:\n for key in [\"decoder_input_ids\"]:\n model_kwargs.pop(key, None)\n\n unused_model_args = []\n model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)\n # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If\n # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)\n if \"kwargs\" in model_args or \"model_kwargs\" in model_args:\n model_args |= set(inspect.signature(self.forward).parameters)\n\n # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`\n if self.config.is_encoder_decoder:\n base_model = getattr(self, self.base_model_prefix, None)\n\n # allow encoder kwargs\n encoder = getattr(self, \"encoder\", None)\n # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.\n # Also, it has `base_model_prefix = \"encoder_decoder\"` but there is no `self.encoder_decoder`\n # TODO: A better way to handle this.\n if encoder is None and base_model is not None:\n encoder = getattr(base_model, \"encoder\", None)\n\n if encoder is not None:\n encoder_model_args = set(inspect.signature(encoder.forward).parameters)\n model_args |= encoder_model_args\n\n # allow decoder kwargs\n decoder = getattr(self, \"decoder\", None)\n if decoder is None and base_model is not None:\n decoder = getattr(base_model, \"decoder\", None)\n\n if decoder is not None:\n decoder_model_args = set(inspect.signature(decoder.forward).parameters)\n model_args |= {f\"decoder_{x}\" for x in decoder_model_args}\n\n decoding_kwargs = ['decoding_kwargs','stop_words_ids']\n for key, value in model_kwargs.items():\n if value is not None and key not in model_args and key not in decoding_kwargs:\n unused_model_args.append(key)\n\n if unused_model_args:\n raise ValueError(\n f\"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the\"\n \" generate arguments will also show up in this list)\"\n )" }, { "identifier": "BaichuanConfig", "path": "pia/lookahead/models/baichuan/configuration_baichuan.py", "snippet": "class BaichuanConfig(PretrainedConfig):\n model_type = \"baichuan\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=125696,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n hidden_act=\"silu\",\n max_position_embeddings=4096,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=0,\n bos_token_id=1,\n eos_token_id=2,\n tie_word_embeddings=False,\n z_loss_weight=0,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.use_cache = use_cache\n self.z_loss_weight = z_loss_weight\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )" }, { "identifier": "build_chat_input", "path": "pia/lookahead/models/baichuan/generation_utils.py", "snippet": "def build_chat_input(model, tokenizer, messages: List[dict], max_new_tokens: int = 0):\n def _parse_messages(messages, split_role=\"user\"):\n system, rounds = \"\", []\n round = []\n for i, message in enumerate(messages):\n if message[\"role\"] == \"system\":\n assert i == 0\n system = message[\"content\"]\n continue\n if message[\"role\"] == split_role and round:\n rounds.append(round)\n round = []\n round.append(message)\n if round:\n rounds.append(round)\n return system, rounds\n\n max_new_tokens = max_new_tokens or model.generation_config.max_new_tokens\n max_input_tokens = model.config.model_max_length - max_new_tokens\n system, rounds = _parse_messages(messages, split_role=\"user\")\n system_tokens = tokenizer.encode(system)\n max_history_tokens = max_input_tokens - len(system_tokens)\n\n history_tokens = []\n for round in rounds[::-1]:\n round_tokens = []\n for message in round:\n if message[\"role\"] == \"user\":\n round_tokens.append(model.generation_config.user_token_id)\n else:\n round_tokens.append(model.generation_config.assistant_token_id)\n round_tokens.extend(tokenizer.encode(message[\"content\"]))\n if len(history_tokens) == 0 or len(history_tokens) + len(round_tokens) <= max_history_tokens:\n history_tokens = round_tokens + history_tokens # concat left\n if len(history_tokens) < max_history_tokens:\n continue\n break\n\n input_tokens = system_tokens + history_tokens\n if messages[-1][\"role\"] != \"assistant\":\n input_tokens.append(model.generation_config.assistant_token_id)\n input_tokens = input_tokens[-max_input_tokens:] # truncate left\n return torch.LongTensor([input_tokens]).to(model.device)" }, { "identifier": "TextIterStreamer", "path": "pia/lookahead/models/baichuan/generation_utils.py", "snippet": "class TextIterStreamer:\n def __init__(self, tokenizer, skip_prompt=False, skip_special_tokens=False):\n self.tokenizer = tokenizer\n self.skip_prompt = skip_prompt\n self.skip_special_tokens = skip_special_tokens\n self.tokens = []\n self.text_queue = Queue()\n self.next_tokens_are_prompt = True\n\n def put(self, value):\n if self.skip_prompt and self.next_tokens_are_prompt:\n self.next_tokens_are_prompt = False\n else:\n if len(value.shape) > 1:\n value = value[0]\n self.tokens.extend(value.tolist())\n self.text_queue.put(\n self.tokenizer.decode(self.tokens, skip_special_tokens=self.skip_special_tokens))\n\n def end(self):\n self.text_queue.put(None)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n value = self.text_queue.get()\n if value is None:\n raise StopIteration()\n else:\n return value" } ]
import math import os import torch import torch.utils.checkpoint from contextlib import contextmanager from threading import Thread from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from transformers import PretrainedConfig from transformers.activations import ACT2FN from transformers.generation.utils import GenerationConfig from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.utils import logging, ContextManagers from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from pia.lookahead.models.baichuan.configuration_baichuan import BaichuanConfig from pia.lookahead.models.baichuan.generation_utils import build_chat_input, TextIterStreamer from xformers import ops as xops from .quantizer import quantize_offline, init_model_weight_int4 from .quantizer import init_model_weight_int4 from accelerate import init_empty_weights, dispatch_model, infer_auto_device_map from accelerate.utils import CustomDtype from accelerate.utils import get_balanced_memory from .quantizer import quantize_online
18,125
if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
# Copyright 2023 Baichuan Inc. All Rights Reserved. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) try: except ImportError: xops = None logger.warning( "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers." ) # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ if len(mask.size()) == 3: bsz, src_len, _ = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, :, :].expand(bsz, 1, tgt_len, src_len).to(dtype) else: bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class RMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ RMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class RotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.max_seq_len_cached = max_position_embeddings t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=torch.float32) freqs = torch.outer(t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1) self.cos_cached = emb.cos()[None, None, :, :].to(torch.float32).to(x.device) self.sin_cached = emb.sin()[None, None, :, :].to(torch.float32).to(x.device) elif self.cos_cached.device != x.device: self.cos_cached = self.cos_cached.to(x.device) self.sin_cached = self.sin_cached.to(x.device) return ( self.cos_cached[:, :, :seq_len, ...], self.sin_cached[:, :, :seq_len, ...], ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos_, sin_, position_ids): cos = cos_.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin_.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q.float() * cos) + (rotate_half(q.float()) * sin) k_embed = (k.float() * cos) + (rotate_half(k.float()) * sin) return q_embed.to(q.dtype), k_embed.to(k.dtype) class MLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class Attention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: BaichuanConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = RotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None if xops is not None and self.training: attn_weights = None query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) attn_output = xops.memory_efficient_attention( query_states, key_states, value_states, attn_bias=xops.LowerTriangularMask() ) else: with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=True): attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class DecoderLayer(nn.Module): def __init__(self, config: BaichuanConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = Attention(config=config) self.mlp = MLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs
class BaichuanPreTrainedModel(LookaheadPreTrainedModel):
0
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "SpatialTransformerPlus", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformerPlus(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True, use_temporal_attention=False):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n self.spatial_attn = SpatialSelfAttention(in_channels)\n if use_temporal_attention:\n self.temporal_attn = TemporalTransformer(in_channels)\n\n def forward(self, x, context=None, ref=None):\n x = torch.cat([x, ref], dim=-1)\n x = self.spatial_attn(x)\n x = x[..., :ref.shape[-1]]\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n dropout,\n out_channels=None,\n use_conv=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x):\n def _forward(self, x):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass ResBlockNoTime(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']:\n xc = batch[cond_key]\n xc = rearrange(xc, 'b h w c -> b c h w')\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n \n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
18,218
self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
9
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,542
@classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
add_clouds_config(cfg)
1
2023-12-15 15:40:58+00:00
24k
modelscope/scepter
scepter/modules/model/network/ldm/ldm_sce.py
[ { "identifier": "ANNOTATORS", "path": "scepter/modules/annotator/registry.py", "snippet": "ANNOTATORS = Registry('ANNOTATORS', build_func=build_annotator)" }, { "identifier": "MODELS", "path": "scepter/modules/model/registry.py", "snippet": "MODELS = Registry('MODELS', build_func=build_model)" }, { "identifier": "TUNERS", "path": "scepter/modules/model/registry.py", "snippet": "TUNERS = Registry('TUNERS', build_func=build_model)" }, { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n support to parse json/dict/yaml_file of parameters.\n :param load: whether load parameters or not.\n :param cfg_dict: default None.\n :param cfg_level: default None, means the current cfg-level for recurrent cfg presentation.\n :param logger: logger instance for print the cfg log.\n one examples:\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Argparser for Cate process:\\n\"\n )\n parser.add_argument(\n \"--stage\",\n dest=\"stage\",\n help=\"Running stage!\",\n default=\"train\",\n choices=[\"train\"]\n )\n\n cfg = Config(load=True, parser_ins=parser)\n '''\n # checking that the logger exists or not\n if logger is None:\n self.logger = StdMsg(name='Config')\n else:\n self.logger = logger\n self.cfg_dict = cfg_dict\n if load:\n if cfg_file is None:\n assert parser_ins is not None\n self.args = _parse_args(parser_ins)\n self.load_from_file(self.args.cfg_file)\n # os.environ[\"LAUNCHER\"] = self.args.launcher\n os.environ['DATA_ONLINE'] = str(self.args.data_online).lower()\n os.environ['SHARE_STORAGE'] = str(\n self.args.share_storage).lower()\n os.environ['ES_DEBUG'] = str(self.args.debug).lower()\n else:\n self.load_from_file(cfg_file)\n if 'ENV' not in self.cfg_dict:\n self.cfg_dict['ENV'] = {\n 'SEED': 2023,\n 'USE_PL': False,\n 'BACKEND': 'nccl',\n 'SYNC_BN': False,\n 'CUDNN_DETERMINISTIC': True,\n 'CUDNN_BENCHMARK': False\n }\n self.logger.info(\n f\"ENV is not set and will use default ENV as {self.cfg_dict['ENV']}; \"\n f'If want to change this value, please set them in your config.'\n )\n else:\n if 'SEED' not in self.cfg_dict['ENV']:\n self.cfg_dict['ENV']['SEED'] = 2023\n self.logger.info(\n f\"SEED is not set and will use default SEED as {self.cfg_dict['ENV']['SEED']}; \"\n f'If want to change this value, please set it in your config.'\n )\n os.environ['ES_SEED'] = str(self.cfg_dict['ENV']['SEED'])\n self._update_dict(self.cfg_dict)\n if load:\n self.logger.info(f'Parse cfg file as \\n {self.dump()}')\n\n def load_from_file(self, file_name):\n self.logger.info(f'Loading config from {file_name}')\n if file_name is None or not os.path.exists(file_name):\n self.logger.info(f'File {file_name} does not exist!')\n self.logger.warning(\n f\"Cfg file is None or doesn't exist, Skip loading config from {file_name}.\"\n )\n return\n if file_name.endswith('.json'):\n self.cfg_dict = self._load_json(file_name)\n self.logger.info(\n f'System take {file_name} as json, because we find json in this file'\n )\n elif file_name.endswith('.yaml'):\n self.cfg_dict = self._load_yaml(file_name)\n self.logger.info(\n f'System take {file_name} as yaml, because we find yaml in this file'\n )\n else:\n self.logger.info(\n f'No config file found! Because we do not find json or yaml in --cfg {file_name}'\n )\n\n def _update_dict(self, cfg_dict):\n def recur(key, elem):\n if type(elem) is dict:\n return key, Config(load=False,\n cfg_dict=elem,\n logger=self.logger)\n elif type(elem) is list:\n config_list = []\n for idx, ele in enumerate(elem):\n if type(ele) is str and ele[1:3] == 'e-':\n ele = float(ele)\n config_list.append(ele)\n elif type(ele) is str:\n config_list.append(ele)\n elif type(ele) is dict:\n config_list.append(\n Config(load=False,\n cfg_dict=ele,\n logger=self.logger))\n elif type(ele) is list:\n config_list.append(ele)\n else:\n config_list.append(ele)\n return key, config_list\n else:\n if type(elem) is str and elem[1:3] == 'e-':\n elem = float(elem)\n return key, elem\n\n dic = dict(recur(k, v) for k, v in cfg_dict.items())\n self.__dict__.update(dic)\n\n def _load_json(self, cfg_file):\n '''\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n cfg = json.load(open(file_name, 'r'))\n except Exception as e:\n self.logger.error(f'Load json from {cfg_file} error. Message: {e}')\n sys.exit()\n return cfg\n\n def _load_yaml(self, cfg_file):\n '''\n if replace some parameters from Base, You can reference the base parameters use Base.\n\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f.read(), Loader=yaml.SafeLoader)\n except Exception as e:\n self.logger.error(f'Load yaml from {cfg_file} error. Message: {e}')\n sys.exit()\n if '_BASE_RUN' not in cfg.keys() and '_BASE_MODEL' not in cfg.keys(\n ) and '_BASE' not in cfg.keys():\n return cfg\n\n if '_BASE' in cfg.keys():\n if cfg['_BASE'][1] == '.':\n prev_count = cfg['_BASE'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - cfg['_BASE'].count('..'))] +\n cfg['_BASE'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n else:\n if '_BASE_RUN' in cfg.keys():\n if cfg['_BASE_RUN'][1] == '.':\n prev_count = cfg['_BASE_RUN'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - prev_count)] +\n cfg['_BASE_RUN'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_RUN'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base,\n cfg,\n preserve_base=True)\n if '_BASE_MODEL' in cfg.keys():\n if cfg['_BASE_MODEL'][1] == '.':\n prev_count = cfg['_BASE_MODEL'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(\n -1 - cfg['_BASE_MODEL'].count('..'))] +\n cfg['_BASE_MODEL'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_MODEL'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n return cfg\n\n def _path_join(self, path_list):\n path = ''\n for p in path_list:\n path += p + '/'\n return path[:-1]\n\n def items(self):\n return self.cfg_dict.items()\n\n def _merge_cfg_from_base(self, cfg_base, cfg, preserve_base=False):\n for k, v in cfg.items():\n if k in cfg_base.keys():\n if isinstance(v, dict):\n self._merge_cfg_from_base(cfg_base[k], v)\n else:\n cfg_base[k] = v\n else:\n if 'BASE' not in k or preserve_base:\n cfg_base[k] = v\n return cfg_base\n\n def _merge_cfg_from_command(self, args, cfg):\n assert len(\n args.opts\n ) % 2 == 0, f'Override list {args.opts} has odd length: {len(args.opts)}'\n\n keys = args.opts[0::2]\n vals = args.opts[1::2]\n\n # maximum supported depth 3\n for idx, key in enumerate(keys):\n key_split = key.split('.')\n assert len(\n key_split\n ) <= 4, 'Key depth error. \\n Maximum depth: 3\\n Get depth: {}'.format(\n len(key_split))\n assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format(\n key_split[0])\n if len(key_split) == 2:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 3:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 4:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[3] in cfg[key_split[0]][key_split[1]][\n key_split[2]].keys(), 'Non-existant key: {}'.format(key)\n\n if len(key_split) == 1:\n cfg[key_split[0]] = vals[idx]\n elif len(key_split) == 2:\n cfg[key_split[0]][key_split[1]] = vals[idx]\n elif len(key_split) == 3:\n cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx]\n elif len(key_split) == 4:\n cfg[key_split[0]][key_split[1]][key_split[2]][\n key_split[3]] = vals[idx]\n\n return cfg\n\n def __repr__(self):\n return '{}\\n'.format(self.dump())\n\n def dump(self):\n return json.dumps(self.cfg_dict, indent=2)\n\n def deep_copy(self):\n return copy.deepcopy(self)\n\n def have(self, name):\n if name in self.__dict__:\n return True\n return False\n\n def get(self, name, default=None):\n if name in self.__dict__:\n return self.__dict__[name]\n return default\n\n def __getitem__(self, key):\n return self.__dict__.__getitem__(key)\n\n def __setattr__(self, key, value):\n super().__setattr__(key, value)\n if hasattr(self, 'cfg_dict') and key in self.cfg_dict:\n if isinstance(value, Config):\n value = value.cfg_dict\n self.cfg_dict[key] = value\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n self.__setattr__(key, value)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def set(self, name, value):\n new_dict = {name: value}\n self.__dict__.update(new_dict)\n self.__setattr__(name, value)\n\n def get_dict(self):\n return self.cfg_dict\n\n def get_lowercase_dict(self, cfg_dict=None):\n if cfg_dict is None:\n cfg_dict = self.get_dict()\n config_new = {}\n for key, val in cfg_dict.items():\n if isinstance(key, str):\n if isinstance(val, dict):\n config_new[key.lower()] = self.get_lowercase_dict(val)\n else:\n config_new[key.lower()] = val\n else:\n config_new[key] = val\n return config_new\n\n @staticmethod\n def get_plain_cfg(cfg=None):\n if isinstance(cfg, Config):\n cfg_new = {}\n cfg_dict = cfg.get_dict()\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, dict):\n cfg_new = {}\n cfg_dict = cfg\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, list):\n cfg_new = []\n cfg_list = cfg\n for val in cfg_list:\n if isinstance(val, (Config, dict, list)):\n cfg_new.append(Config.get_plain_cfg(val))\n elif isinstance(val, (str, numbers.Number)):\n cfg_new.append(val)\n return cfg_new\n else:\n return cfg" }, { "identifier": "dict_to_yaml", "path": "scepter/modules/utils/config.py", "snippet": "def dict_to_yaml(module_name, name, json_config, set_name=False):\n '''\n { \"ENV\" :\n { \"description\" : \"\",\n \"A\" : {\n \"value\": 1.0,\n \"description\": \"\"\n }\n }\n }\n convert std dict to yaml\n :param module_name:\n :param json_config:\n :return:\n '''\n def convert_yaml_style(level=1,\n name='ENV',\n description='ENV PARA',\n default='',\n type_name='',\n is_sys=False):\n new_line = ''\n new_line += '{}# {} DESCRIPTION: {} TYPE: {} default: {}\\n'.format(\n '\\t' * (level - 1), name.upper(), description, type_name,\n f'\\'{default}\\'' if isinstance(default, str) else default)\n if is_sys:\n if name == '-':\n new_line += '{}{}\\n'.format('\\t' * (level - 1), name.upper())\n else:\n new_line += '{}{}:\\n'.format('\\t' * (level - 1), name.upper())\n else:\n # if isinstance(default, str):\n # default = f'\\'{default}\\''\n if default is None:\n new_line += '{}# {}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n else:\n new_line += '{}{}: {}\\n'.format('\\t' * (level - 1),\n name.upper(), default)\n return new_line\n\n def parse_dict(json_config,\n level_num,\n parent_key,\n set_name=False,\n name='',\n parent_type='dict'):\n yaml_str = ''\n # print(level_num, json_config)\n if isinstance(json_config, dict):\n if 'value' in json_config:\n value = json_config['value']\n if isinstance(value, dict):\n assert len(value) < 1\n value = None\n description = json_config.get('description', '')\n yaml_str += convert_yaml_style(level=level_num - 1,\n name=parent_key,\n description=description,\n default=value,\n type_name=type(value).__name__)\n return True, yaml_str\n else:\n if len(json_config) < 1:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default='',\n type_name='')\n level_num += 1\n for k, v in json_config.items():\n if k == 'description':\n continue\n if isinstance(v, dict):\n is_final, new_yaml_str = parse_dict(v,\n level_num,\n k,\n parent_type='dict')\n if not is_final and parent_type == 'dict':\n description = v.get('description', '')\n yaml_str += convert_yaml_style(\n level=level_num - 1,\n name=k,\n description=description,\n default='',\n type_name='',\n is_sys=True)\n if not is_final and parent_type == 'list':\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=k,\n type_name='')\n yaml_str += new_yaml_str\n elif isinstance(v, list):\n base_yaml_str = convert_yaml_style(level=level_num - 1,\n name=k,\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += base_yaml_str\n for tup in v:\n is_final, new_yaml_str = parse_dict(\n tup, level_num, '-', parent_type='list')\n if not is_final:\n yaml_str += convert_yaml_style(level=level_num,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n yaml_str += new_yaml_str\n else:\n raise KeyError(\n f'json config {json_config} must be a dict of list'\n )\n\n elif isinstance(json_config, list):\n level_num += 1\n for tup in json_config:\n is_final, new_yaml_str = parse_dict(tup, level_num, '-')\n if not is_final:\n\n yaml_str += convert_yaml_style(level=level_num - 1,\n name='-',\n description='',\n default='',\n type_name='',\n is_sys=True)\n if set_name:\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n raise KeyError(f'json config {json_config} must be a dict')\n return False, yaml_str\n\n if isinstance(json_config, dict):\n first_dict, sec_dict, third_dict = {}, {}, {}\n for key, value in json_config.items():\n if isinstance(value, dict) and len(value) > 0:\n first_dict[key] = value\n elif isinstance(value, dict) and len(value) == 0:\n sec_dict[key] = value\n elif isinstance(value, list):\n third_dict[key] = value\n else:\n raise f'Config {json_config} is illegal'\n json_config = {}\n json_config.update(first_dict)\n json_config.update(sec_dict)\n json_config.update(third_dict)\n\n yaml_str = f'[{module_name}] module yaml examples:\\n'\n level_num = 1\n base_yaml_str = convert_yaml_style(level=level_num,\n name=module_name,\n description='',\n default='',\n type_name='',\n is_sys=True)\n level_num += 1\n\n is_final, new_yaml_str = parse_dict(json_config,\n level_num,\n module_name,\n set_name=isinstance(json_config, list)\n and set_name,\n name=name)\n if not is_final:\n yaml_str += base_yaml_str\n if set_name and not isinstance(json_config, list):\n yaml_str += convert_yaml_style(level=level_num,\n name='NAME',\n description='',\n default=name,\n type_name='')\n yaml_str += new_yaml_str\n else:\n yaml_str += new_yaml_str[1:]\n\n return yaml_str" }, { "identifier": "LatentDiffusion", "path": "scepter/modules/model/network/ldm/ldm.py", "snippet": "class LatentDiffusion(TrainModule):\n para_dict = {\n 'PARAMETERIZATION': {\n 'value':\n 'v',\n 'description':\n \"The prediction type, you can choose from 'eps' and 'x0' and 'v'\",\n },\n 'TIMESTEPS': {\n 'value': 1000,\n 'description': 'The schedule steps for diffusion.',\n },\n 'SCHEDULE_ARGS': {},\n 'MIN_SNR_GAMMA': {\n 'value': None,\n 'description': 'The minimum snr gamma, default is None.',\n },\n 'ZERO_TERMINAL_SNR': {\n 'value': False,\n 'description': 'Whether zero terminal snr, default is False.',\n },\n 'PRETRAINED_MODEL': {\n 'value': None,\n 'description': \"Whole model's pretrained model path.\",\n },\n 'IGNORE_KEYS': {\n 'value': [],\n 'description': 'The ignore keys for pretrain model loaded.',\n },\n 'SCALE_FACTOR': {\n 'value': 0.18215,\n 'description': 'The vae embeding scale.',\n },\n 'SIZE_FACTOR': {\n 'value': 8,\n 'description': 'The vae size factor.',\n },\n 'DEFAULT_N_PROMPT': {\n 'value': '',\n 'description': 'The default negtive prompt.',\n },\n 'TRAIN_N_PROMPT': {\n 'value': '',\n 'description': 'The negtive prompt used in train phase.',\n },\n 'P_ZERO': {\n 'value': 0.0,\n 'description': 'The prob for zero or negtive prompt.',\n },\n 'USE_EMA': {\n 'value': True,\n 'description': 'Use Ema or not. Default True',\n },\n 'DIFFUSION_MODEL': {},\n 'DIFFUSION_MODEL_EMA': {},\n 'FIRST_STAGE_MODEL': {},\n 'COND_STAGE_MODEL': {},\n 'TOKENIZER': {}\n }\n\n def __init__(self, cfg, logger=None):\n super().__init__(cfg, logger=logger)\n self.init_params()\n self.construct_network()\n\n def init_params(self):\n self.parameterization = self.cfg.get('PARAMETERIZATION', 'eps')\n assert self.parameterization in [\n 'eps', 'x0', 'v'\n ], 'currently only supporting \"eps\" and \"x0\" and \"v\"'\n self.num_timesteps = self.cfg.get('TIMESTEPS', 1000)\n\n self.schedule_args = {\n k.lower(): v\n for k, v in self.cfg.get('SCHEDULE_ARGS', {\n 'NAME': 'logsnr_cosine_interp',\n 'SCALE_MIN': 2.0,\n 'SCALE_MAX': 4.0\n }).items()\n }\n\n self.min_snr_gamma = self.cfg.get('MIN_SNR_GAMMA', None)\n\n self.zero_terminal_snr = self.cfg.get('ZERO_TERMINAL_SNR', False)\n if self.zero_terminal_snr:\n assert self.parameterization == 'v', 'Now zero_terminal_snr only support v-prediction mode.'\n\n self.sigmas = noise_schedule(schedule=self.schedule_args.pop('name'),\n n=self.num_timesteps,\n zero_terminal_snr=self.zero_terminal_snr,\n **self.schedule_args)\n\n self.diffusion = GaussianDiffusion(\n sigmas=self.sigmas, prediction_type=self.parameterization)\n\n self.pretrained_model = self.cfg.get('PRETRAINED_MODEL', None)\n self.ignore_keys = self.cfg.get('IGNORE_KEYS', [])\n\n self.model_config = self.cfg.DIFFUSION_MODEL\n self.first_stage_config = self.cfg.FIRST_STAGE_MODEL\n self.cond_stage_config = self.cfg.COND_STAGE_MODEL\n self.tokenizer_config = self.cfg.get('TOKENIZER', None)\n self.loss_config = self.cfg.get('LOSS', None)\n\n self.scale_factor = self.cfg.get('SCALE_FACTOR', 0.18215)\n self.size_factor = self.cfg.get('SIZE_FACTOR', 8)\n self.default_n_prompt = self.cfg.get('DEFAULT_N_PROMPT', '')\n self.default_n_prompt = '' if self.default_n_prompt is None else self.default_n_prompt\n self.p_zero = self.cfg.get('P_ZERO', 0.0)\n self.train_n_prompt = self.cfg.get('TRAIN_N_PROMPT', '')\n if self.default_n_prompt is None:\n self.default_n_prompt = ''\n if self.train_n_prompt is None:\n self.train_n_prompt = ''\n self.use_ema = self.cfg.get('USE_EMA', True)\n self.model_ema_config = self.cfg.get('DIFFUSION_MODEL_EMA', None)\n\n def construct_network(self):\n self.model = BACKBONES.build(self.model_config, logger=self.logger)\n self.logger.info('all parameters:{}'.format(count_params(self.model)))\n if self.use_ema and self.model_ema_config:\n self.model_ema = BACKBONES.build(self.model_ema_config,\n logger=self.logger)\n self.model_ema = self.model_ema.eval()\n for param in self.model_ema.parameters():\n param.requires_grad = False\n if self.loss_config:\n self.loss = LOSSES.build(self.loss_config, logger=self.logger)\n if self.tokenizer_config is not None:\n self.tokenizer = TOKENIZERS.build(self.tokenizer_config,\n logger=self.logger)\n\n self.first_stage_model = MODELS.build(self.first_stage_config,\n logger=self.logger)\n self.first_stage_model = self.first_stage_model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n if self.tokenizer_config is not None:\n self.cond_stage_config.KWARGS = {\n 'vocab_size': self.tokenizer.vocab_size\n }\n if self.cond_stage_config == '__is_unconditional__':\n print(\n f'Training {self.__class__.__name__} as an unconditional model.'\n )\n self.cond_stage_model = None\n else:\n model = EMBEDDERS.build(self.cond_stage_config, logger=self.logger)\n self.cond_stage_model = model.eval().requires_grad_(False)\n self.cond_stage_model.train = disabled_train\n\n def load_pretrained_model(self, pretrained_model):\n if pretrained_model is not None:\n with FS.get_from(pretrained_model,\n wait_finish=True) as local_model:\n self.init_from_ckpt(local_model, ignore_keys=self.ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n if path.endswith('safetensors'):\n from safetensors.torch import load_file as load_safetensors\n sd = load_safetensors(path)\n else:\n sd = torch.load(path, map_location='cpu')\n new_sd = OrderedDict()\n for k, v in sd.items():\n ignored = False\n for ik in ignore_keys:\n if ik in k:\n if we.rank == 0:\n self.logger.info(\n 'Ignore key {} from state_dict.'.format(k))\n ignored = True\n break\n if not ignored:\n if k.startswith('model.diffusion_model.'):\n k = k.replace('model.diffusion_model.', 'model.')\n k = k.replace('post_quant_conv',\n 'conv2') if 'post_quant_conv' in k else k\n k = k.replace('quant_conv',\n 'conv1') if 'quant_conv' in k else k\n new_sd[k] = v\n\n missing, unexpected = self.load_state_dict(new_sd, strict=False)\n if we.rank == 0:\n self.logger.info(\n f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'\n )\n if len(missing) > 0:\n self.logger.info(f'Missing Keys:\\n {missing}')\n if len(unexpected) > 0:\n self.logger.info(f'\\nUnexpected Keys:\\n {unexpected}')\n\n def encode_condition(self, input, method='encode_text'):\n if hasattr(self.cond_stage_model, method):\n return getattr(self.cond_stage_model,\n method)(input, tokenizer=self.tokenizer)\n else:\n return self.cond_stage_model(input)\n\n def forward_train(self, image=None, noise=None, prompt=None, **kwargs):\n\n x_start = self.encode_first_stage(image, **kwargs)\n t = torch.randint(0,\n self.num_timesteps, (x_start.shape[0], ),\n device=x_start.device).long()\n context = {}\n if prompt and self.cond_stage_model:\n zeros = (torch.rand(len(prompt)) < self.p_zero).numpy().tolist()\n prompt = [\n self.train_n_prompt if zeros[idx] else p\n for idx, p in enumerate(prompt)\n ]\n self.register_probe({'after_prompt': prompt})\n with torch.autocast(device_type='cuda', enabled=False):\n context = self.encode_condition(\n self.tokenizer(prompt).to(we.device_id))\n if 'hint' in kwargs and kwargs['hint'] is not None:\n hint = kwargs.pop('hint')\n if isinstance(context, dict):\n context['hint'] = hint\n else:\n context = {'crossattn': context, 'hint': hint}\n else:\n hint = None\n if self.min_snr_gamma is not None:\n alphas = self.diffusion.alphas.to(we.device_id)[t]\n sigmas = self.diffusion.sigmas.pow(2).to(we.device_id)[t]\n snrs = (alphas / sigmas).clamp(min=1e-20)\n min_snrs = snrs.clamp(max=self.min_snr_gamma)\n weights = min_snrs / snrs\n else:\n weights = 1\n self.register_probe({'snrs_weights': weights})\n\n loss = self.diffusion.loss(x0=x_start,\n t=t,\n model=self.model,\n model_kwargs={'cond': context},\n noise=noise,\n **kwargs)\n loss = loss * weights\n loss = loss.mean()\n ret = {'loss': loss, 'probe_data': {'prompt': prompt}}\n return ret\n\n def noise_sample(self, batch_size, h, w, g):\n noise = torch.empty(batch_size, 4, h, w,\n device=we.device_id).normal_(generator=g)\n return noise\n\n def forward(self, **kwargs):\n if self.training:\n return self.forward_train(**kwargs)\n else:\n return self.forward_test(**kwargs)\n\n @torch.no_grad()\n @torch.autocast('cuda', dtype=torch.float16)\n def forward_test(self,\n prompt=None,\n n_prompt=None,\n sampler='ddim',\n sample_steps=50,\n seed=2023,\n guide_scale=7.5,\n guide_rescale=0.5,\n discretization='trailing',\n run_train_n=True,\n **kwargs):\n g = torch.Generator(device=we.device_id)\n seed = seed if seed >= 0 else random.randint(0, 2**32 - 1)\n g.manual_seed(seed)\n num_samples = len(prompt)\n if 'dynamic_encode_text' in kwargs and kwargs.pop(\n 'dynamic_encode_text'):\n method = 'dynamic_encode_text'\n else:\n method = 'encode_text'\n\n n_prompt = default(n_prompt, [self.default_n_prompt] * len(prompt))\n assert isinstance(prompt, list) and \\\n isinstance(n_prompt, list) and \\\n len(prompt) == len(n_prompt)\n # with torch.autocast(device_type=\"cuda\", enabled=False):\n context = self.encode_condition(self.tokenizer(prompt).to(\n we.device_id),\n method=method)\n null_context = self.encode_condition(self.tokenizer(n_prompt).to(\n we.device_id),\n method=method)\n if 'hint' in kwargs and kwargs['hint'] is not None:\n hint = kwargs.pop('hint')\n if isinstance(context, dict):\n context['hint'] = hint\n else:\n context = {'crossattn': context, 'hint': hint}\n if isinstance(null_context, dict):\n null_context['hint'] = hint\n else:\n null_context = {'crossattn': null_context, 'hint': hint}\n else:\n hint = None\n if 'index' in kwargs:\n kwargs.pop('index')\n image_size = None\n if 'meta' in kwargs:\n meta = kwargs.pop('meta')\n if 'image_size' in meta:\n h = int(meta['image_size'][0][0])\n w = int(meta['image_size'][1][0])\n image_size = [h, w]\n if 'image_size' in kwargs:\n image_size = kwargs.pop('image_size')\n if isinstance(image_size, numbers.Number):\n image_size = [image_size, image_size]\n if image_size is None:\n image_size = [1024, 1024]\n height, width = image_size\n noise = self.noise_sample(num_samples, height // self.size_factor,\n width // self.size_factor, g)\n # UNet use input n_prompt\n samples = self.diffusion.sample(solver=sampler,\n noise=noise,\n model=self.model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond': null_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=True,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n x_samples = self.decode_first_stage(samples).float()\n x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n # UNet use train n_prompt\n if not self.default_n_prompt == self.train_n_prompt and run_train_n:\n train_n_prompt = [self.train_n_prompt] * len(prompt)\n null_train_context = self.encode_condition(\n self.tokenizer(train_n_prompt).to(we.device_id), method=method)\n\n tn_samples = self.diffusion.sample(solver=sampler,\n noise=noise,\n model=self.model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond':\n null_train_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=we.rank == 0,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n\n t_x_samples = self.decode_first_stage(tn_samples).float()\n\n t_x_samples = torch.clamp((t_x_samples + 1.0) / 2.0,\n min=0.0,\n max=1.0)\n else:\n train_n_prompt = ['' for _ in prompt]\n t_x_samples = [None for _ in prompt]\n\n outputs = list()\n for i, (p, np, tnp, img, t_img) in enumerate(\n zip(prompt, n_prompt, train_n_prompt, x_samples, t_x_samples)):\n one_tup = {'prompt': p, 'n_prompt': np, 'image': img}\n if hint is not None:\n one_tup.update({'hint': hint[i]})\n if t_img is not None:\n one_tup['train_n_prompt'] = tnp\n one_tup['train_n_image'] = t_img\n outputs.append(one_tup)\n\n return outputs\n\n @torch.no_grad()\n def log_images(self, image=None, prompt=None, n_prompt=None, **kwargs):\n results = self.forward_test(prompt=prompt, n_prompt=n_prompt, **kwargs)\n outputs = list()\n for img, res in zip(image, results):\n one_tup = {\n 'orig': torch.clamp((img + 1.0) / 2.0, min=0.0, max=1.0),\n 'recon': res['image'],\n 'prompt': res['prompt'],\n 'n_prompt': res['n_prompt']\n }\n if 'hint' in res:\n one_tup.update({'hint': res['hint']})\n if 'train_n_prompt' in res:\n one_tup['train_n_prompt'] = res['train_n_prompt']\n one_tup['train_n_image'] = res['train_n_image']\n outputs.append(one_tup)\n return outputs\n\n @torch.no_grad()\n def encode_first_stage(self, x, **kwargs):\n z = self.first_stage_model.encode(x)\n return self.scale_factor * z\n\n @torch.no_grad()\n def decode_first_stage(self, z):\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @staticmethod\n def get_config_template():\n return dict_to_yaml('MODEL',\n __class__.__name__,\n LatentDiffusion.para_dict,\n set_name=True)" }, { "identifier": "LatentDiffusionXL", "path": "scepter/modules/model/network/ldm/ldm_xl.py", "snippet": "class LatentDiffusionXL(LatentDiffusion):\n para_dict = {\n 'LOAD_REFINER': {\n 'value': False,\n 'description': 'Whether load REFINER or Not.'\n }\n }\n\n def __init__(self, cfg, logger=None):\n super().__init__(cfg, logger=logger)\n self.load_refiner = cfg.get('LOAD_REFINER', False)\n self.latent_cache_data = {}\n self.SURPPORT_RATIOS = {\n '0.5': (704, 1408),\n '0.52': (704, 1344),\n '0.57': (768, 1344),\n '0.6': (768, 1280),\n '0.68': (832, 1216),\n '0.72': (832, 1152),\n '0.78': (896, 1152),\n '0.82': (896, 1088),\n '0.88': (960, 1088),\n '0.94': (960, 1024),\n '1.0': (1024, 1024),\n '1.07': (1024, 960),\n '1.13': (1088, 960),\n '1.21': (1088, 896),\n '1.29': (1152, 896),\n '1.38': (1152, 832),\n '1.46': (1216, 832),\n '1.67': (1280, 768),\n '1.75': (1344, 768),\n '1.91': (1344, 704),\n '2.0': (1408, 704),\n '2.09': (1472, 704),\n '2.4': (1536, 640),\n '2.5': (1600, 640),\n '2.89': (1664, 576),\n '3.0': (1728, 576),\n }\n\n def construct_network(self):\n super().construct_network()\n self.refiner_cfg = self.cfg.get('REFINER_MODEL', None)\n self.refiner_cond_cfg = self.cfg.get('REFINER_COND_MODEL', None)\n if self.refiner_cfg and self.load_refiner:\n self.refiner_model = BACKBONES.build(self.refiner_cfg,\n logger=self.logger)\n self.refiner_cond_model = BACKBONES.build(self.refiner_cond_cfg,\n logger=self.logger)\n else:\n self.refiner_model = None\n self.refiner_cond_model = None\n self.input_keys = self.get_unique_embedder_keys_from_conditioner(\n self.cond_stage_model)\n if self.refiner_cond_model:\n self.input_refiner_keys = self.get_unique_embedder_keys_from_conditioner(\n self.refiner_cond_model)\n else:\n self.input_refiner_keys = []\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n if path.endswith('safetensors'):\n from safetensors.torch import load_file as load_safetensors\n sd = load_safetensors(path)\n else:\n sd = torch.load(path, map_location='cpu')\n new_sd = OrderedDict()\n for k, v in sd.items():\n ignored = False\n for ik in ignore_keys:\n if ik in k:\n if we.rank == 0:\n self.logger.info(\n 'Ignore key {} from state_dict.'.format(k))\n ignored = True\n break\n if not ignored:\n if k.startswith('model.diffusion_model.'):\n k = k.replace('model.diffusion_model.', 'model.')\n if k.startswith('conditioner.'):\n k = k.replace('conditioner.', 'cond_stage_model.')\n k = k.replace('post_quant_conv',\n 'conv2') if 'post_quant_conv' in k else k\n k = k.replace('quant_conv',\n 'conv1') if 'quant_conv' in k else k\n new_sd[k] = v\n\n missing, unexpected = self.load_state_dict(new_sd, strict=False)\n if we.rank == 0:\n self.logger.info(\n f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys'\n )\n if len(missing) > 0:\n self.logger.info(f'Missing Keys:\\n {missing}')\n if len(unexpected) > 0:\n self.logger.info(f'\\nUnexpected Keys:\\n {unexpected}')\n\n def get_unique_embedder_keys_from_conditioner(self, conditioner):\n input_keys = []\n for x in conditioner.embedders:\n input_keys.extend(x.input_keys)\n return list(set(input_keys))\n\n def get_batch(self, keys, value_dict, num_samples=1):\n batch = {}\n batch_uc = {}\n N = num_samples\n device = we.device_id\n for key in keys:\n if key == 'prompt':\n batch['prompt'] = value_dict['prompt']\n batch_uc['prompt'] = value_dict['negative_prompt']\n elif key == 'original_size_as_tuple':\n batch['original_size_as_tuple'] = (torch.tensor(\n value_dict['original_size_as_tuple']).to(device).repeat(\n N, 1))\n elif key == 'crop_coords_top_left':\n batch['crop_coords_top_left'] = (torch.tensor(\n value_dict['crop_coords_top_left']).to(device).repeat(\n N, 1))\n elif key == 'aesthetic_score':\n batch['aesthetic_score'] = (torch.tensor(\n [value_dict['aesthetic_score']]).to(device).repeat(N, 1))\n batch_uc['aesthetic_score'] = (torch.tensor([\n value_dict['negative_aesthetic_score']\n ]).to(device).repeat(N, 1))\n\n elif key == 'target_size_as_tuple':\n batch['target_size_as_tuple'] = (torch.tensor(\n value_dict['target_size_as_tuple']).to(device).repeat(\n N, 1))\n else:\n batch[key] = value_dict[key]\n\n for key in batch.keys():\n if key not in batch_uc and isinstance(batch[key], torch.Tensor):\n batch_uc[key] = torch.clone(batch[key])\n return batch, batch_uc\n\n def forward_train(self, image=None, noise=None, prompt=None, **kwargs):\n with torch.autocast('cuda', enabled=False):\n x_start = self.encode_first_stage(image, **kwargs)\n\n t = torch.randint(0,\n self.num_timesteps, (x_start.shape[0], ),\n device=x_start.device).long()\n\n if prompt and self.cond_stage_model:\n zeros = (torch.rand(len(prompt)) < self.p_zero).numpy().tolist()\n prompt = [\n self.train_n_prompt if zeros[idx] else p\n for idx, p in enumerate(prompt)\n ]\n self.register_probe({'after_prompt': prompt})\n batch = {'prompt': prompt}\n for key in self.input_keys:\n if key not in kwargs:\n continue\n batch[key] = kwargs[key].to(we.device_id)\n context = getattr(self.cond_stage_model, 'encode')(batch)\n if 'hint' in kwargs and kwargs['hint'] is not None:\n hint = kwargs.pop('hint')\n if isinstance(context, dict):\n context['hint'] = hint\n else:\n context = {'crossattn': context, 'hint': hint}\n else:\n hint = None\n if self.min_snr_gamma is not None:\n alphas = self.diffusion.alphas.to(we.device_id)[t]\n sigmas = self.diffusion.sigmas.pow(2).to(we.device_id)[t]\n snrs = (alphas / sigmas).clamp(min=1e-20)\n min_snrs = snrs.clamp(max=self.min_snr_gamma)\n weights = min_snrs / snrs\n else:\n weights = 1\n self.register_probe({'snrs_weights': weights})\n loss = self.diffusion.loss(x0=x_start,\n t=t,\n model=self.model,\n model_kwargs={'cond': context},\n noise=noise,\n **kwargs)\n loss = loss * weights\n loss = loss.mean()\n ret = {'loss': loss, 'probe_data': {'prompt': prompt}}\n return ret\n\n def check_valid_inputs(self, kwargs):\n batch_data = {}\n all_keys = set(self.input_keys + self.input_refiner_keys)\n for key in all_keys:\n if key in kwargs:\n batch_data[key] = kwargs.pop(key)\n return batch_data\n\n @torch.no_grad()\n def forward_test(self,\n prompt=None,\n n_prompt=None,\n image=None,\n sampler='ddim',\n sample_steps=50,\n seed=2023,\n guide_scale=7.5,\n guide_rescale=0.5,\n discretization='trailing',\n img_to_img_strength=0.0,\n run_train_n=True,\n refine_strength=0.0,\n refine_sampler='ddim',\n **kwargs):\n g = torch.Generator(device=we.device_id)\n seed = seed if seed >= 0 else random.randint(0, 2**32 - 1)\n g.manual_seed(seed)\n num_samples = len(prompt)\n n_prompt = default(n_prompt, [self.default_n_prompt] * len(prompt))\n assert isinstance(prompt, list) and \\\n isinstance(n_prompt, list) and \\\n len(prompt) == len(n_prompt)\n image_size = None\n if 'meta' in kwargs:\n meta = kwargs.pop('meta')\n if 'image_size' in meta:\n h = int(meta['image_size'][0][0])\n w = int(meta['image_size'][1][0])\n image_size = [h, w]\n if 'image_size' in kwargs:\n image_size = kwargs.pop('image_size')\n if image_size is None or isinstance(image_size, numbers.Number):\n image_size = [1024, 1024]\n pre_batch = self.check_valid_inputs(kwargs)\n if len(pre_batch) > 0:\n batch = {'prompt': prompt}\n batch.update(pre_batch)\n batch_uc = {'prompt': n_prompt}\n batch_uc.update(pre_batch)\n else:\n height, width = image_size\n if image is None:\n ori_width = width\n ori_height = height\n else:\n ori_height, ori_width = image.shape[-2:]\n\n value_dict = {\n 'original_size_as_tuple': [ori_height, ori_width],\n 'target_size_as_tuple': [height, width],\n 'prompt': prompt,\n 'negative_prompt': n_prompt,\n 'crop_coords_top_left': [0, 0]\n }\n if refine_strength > 0:\n assert 'aesthetic_score' in kwargs and 'negative_aesthetic_score' in kwargs\n value_dict['aesthetic_score'] = kwargs.pop('aesthetic_score')\n value_dict['negative_aesthetic_score'] = kwargs.pop(\n 'negative_aesthetic_score')\n\n batch, batch_uc = self.get_batch(self.input_keys,\n value_dict,\n num_samples=num_samples)\n\n context = getattr(self.cond_stage_model, 'encode')(batch)\n null_context = getattr(self.cond_stage_model, 'encode')(batch_uc)\n\n if 'hint' in kwargs and kwargs['hint'] is not None:\n hint = kwargs.pop('hint')\n if isinstance(context, dict):\n context['hint'] = hint\n else:\n context = {'crossattn': context, 'hint': hint}\n if isinstance(null_context, dict):\n null_context['hint'] = hint\n else:\n null_context = {'crossattn': null_context, 'hint': hint}\n else:\n hint = None\n\n if 'index' in kwargs:\n kwargs.pop('index')\n height, width = batch['target_size_as_tuple'][0].cpu().numpy().tolist()\n noise = self.noise_sample(num_samples, height // self.size_factor,\n width // self.size_factor, g)\n if image is not None and img_to_img_strength > 0:\n # run image2image\n if not (ori_width == width and ori_height == height):\n image = F.interpolate(image, (height, width), mode='bicubic')\n with torch.autocast('cuda', enabled=False):\n z = self.encode_first_stage(image, **kwargs)\n else:\n z = None\n\n # UNet use input n_prompt\n samples = self.diffusion.sample(\n noise=noise,\n x=z,\n denoising_strength=img_to_img_strength if z is not None else 1.0,\n refine_strength=refine_strength,\n solver=sampler,\n model=self.model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond': null_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=True,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n\n # apply refiner\n if refine_strength > 0:\n assert self.refiner_model is not None\n assert self.refiner_cond_model is not None\n with torch.autocast('cuda', enabled=False):\n before_refiner_samples = self.decode_first_stage(\n samples).float()\n before_refiner_samples = torch.clamp(\n (before_refiner_samples + 1.0) / 2.0, min=0.0, max=1.0)\n\n if len(pre_batch) > 0:\n batch = {'prompt': prompt}\n batch.update(pre_batch)\n batch_uc = {'prompt': n_prompt}\n batch_uc.update(pre_batch)\n else:\n batch, batch_uc = self.get_batch(self.input_refiner_keys,\n value_dict,\n num_samples=num_samples)\n\n context = getattr(self.refiner_cond_model, 'encode')(batch)\n null_context = getattr(self.refiner_cond_model, 'encode')(batch_uc)\n\n samples = self.diffusion.sample(\n noise=noise,\n x=samples,\n denoising_strength=img_to_img_strength\n if z is not None else 1.0,\n refine_strength=refine_strength,\n refine_stage=True,\n solver=sampler,\n model=self.refiner_model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond': null_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=True,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n else:\n before_refiner_samples = [None for _ in prompt]\n\n with torch.autocast('cuda', enabled=False):\n x_samples = self.decode_first_stage(samples).float()\n x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n\n # UNet use train n_prompt\n if not self.default_n_prompt == self.train_n_prompt and run_train_n:\n train_n_prompt = [self.train_n_prompt] * len(prompt)\n if len(pre_batch) > 0:\n pre_batch = {'prompt': prompt}\n batch.update(pre_batch)\n batch_uc = {'prompt': train_n_prompt}\n batch_uc.update(pre_batch)\n else:\n value_dict['negative_prompt'] = train_n_prompt\n batch, batch_uc = self.get_batch(self.input_keys,\n value_dict,\n num_samples=num_samples)\n\n context = getattr(self.cond_stage_model, 'encode')(batch)\n null_context = getattr(self.cond_stage_model, 'encode')(batch_uc)\n\n tn_samples = self.diffusion.sample(\n noise=noise,\n x=z,\n denoising_strength=img_to_img_strength\n if z is not None else 1.0,\n refine_strength=refine_strength,\n solver=sampler,\n model=self.model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond': null_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=we.rank == 0,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n\n if refine_strength > 0:\n assert self.refiner_model is not None\n assert self.refiner_cond_model is not None\n with torch.autocast('cuda', enabled=False):\n before_refiner_t_samples = self.decode_first_stage(\n samples).float()\n before_refiner_t_samples = torch.clamp(\n (before_refiner_t_samples + 1.0) / 2.0, min=0.0, max=1.0)\n\n if len(pre_batch) > 0:\n pre_batch = {'prompt': prompt}\n batch.update(pre_batch)\n batch_uc = {'prompt': train_n_prompt}\n batch_uc.update(pre_batch)\n else:\n batch, batch_uc = self.get_batch(self.input_refiner_keys,\n value_dict,\n num_samples=num_samples)\n\n context = getattr(self.refiner_cond_model, 'encode')(batch)\n null_context = getattr(self.refiner_cond_model,\n 'encode')(batch_uc)\n tn_samples = self.diffusion.sample(\n noise=noise,\n x=tn_samples,\n denoising_strength=img_to_img_strength\n if z is not None else 1.0,\n refine_strength=refine_strength,\n refine_stage=True,\n solver=sampler,\n model=self.refiner_model,\n model_kwargs=[{\n 'cond': context\n }, {\n 'cond': null_context\n }],\n steps=sample_steps,\n guide_scale=guide_scale,\n guide_rescale=guide_rescale,\n discretization=discretization,\n show_progress=True,\n seed=seed,\n condition_fn=None,\n clamp=None,\n percentile=None,\n t_max=None,\n t_min=None,\n discard_penultimate_step=None,\n return_intermediate=None,\n **kwargs)\n else:\n before_refiner_t_samples = [None for _ in prompt]\n\n t_x_samples = self.decode_first_stage(tn_samples).float()\n\n t_x_samples = torch.clamp((t_x_samples + 1.0) / 2.0,\n min=0.0,\n max=1.0)\n else:\n train_n_prompt = ['' for _ in prompt]\n t_x_samples = [None for _ in prompt]\n before_refiner_t_samples = [None for _ in prompt]\n\n outputs = list()\n for i, (p, np, tnp, img, r_img, t_img, r_t_img) in enumerate(\n zip(prompt, n_prompt, train_n_prompt, x_samples,\n before_refiner_samples, t_x_samples,\n before_refiner_t_samples)):\n one_tup = {\n 'prompt': p,\n 'n_prompt': np,\n 'image': img,\n 'before_refiner_image': r_img\n }\n if hint is not None:\n one_tup.update({'hint': hint[i]})\n if t_img is not None:\n one_tup['train_n_prompt'] = tnp\n one_tup['train_n_image'] = t_img\n one_tup['train_n_before_refiner_image'] = r_t_img\n outputs.append(one_tup)\n\n return outputs\n\n @staticmethod\n def get_config_template():\n return dict_to_yaml('MODEL',\n __class__.__name__,\n LatentDiffusionXL.para_dict,\n set_name=True)" } ]
import copy import torch import torch.nn as nn import torchvision.transforms as TT from scepter.modules.annotator.registry import ANNOTATORS from scepter.modules.model.registry import MODELS, TUNERS from scepter.modules.utils.config import Config, dict_to_yaml from .ldm import LatentDiffusion from .ldm_xl import LatentDiffusionXL
14,472
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. @MODELS.register_class() class LatentDiffusionSCETuning(LatentDiffusion): para_dict = {} para_dict.update(LatentDiffusion.para_dict) def __init__(self, cfg, logger): super().__init__(cfg, logger=logger) def init_params(self): super().init_params() self.tuner_model_config = self.cfg.TUNER_MODEL def construct_network(self): super().construct_network() input_block_channels = self.model._input_block_chans sc_tuner_cfg = self.tuner_model_config['SC_TUNER_CFG'] use_layers = self.tuner_model_config.get('USE_LAYERS', None) lsc_tuner_blocks = nn.ModuleList([]) for i, chan in enumerate(input_block_channels[::-1]): if use_layers and i not in use_layers: lsc_tuner_blocks.append(nn.Identity()) continue tuner_cfg = copy.deepcopy(sc_tuner_cfg) tuner_cfg['DIM'] = chan tuner_cfg['TUNER_LENGTH'] = int(chan * tuner_cfg.get('DOWN_RATIO', 1.0)) sc_tuner = TUNERS.build(tuner_cfg, logger=self.logger) lsc_tuner_blocks.append(sc_tuner) self.model.lsc_identity = lsc_tuner_blocks def save_pretrained(self, *args, destination=None, prefix='', keep_vars=False): save_state = { key: value for key, value in self.state_dict().items() if 'lsc_identity' in key } return save_state def save_pretrained_config(self): return copy.deepcopy(self.cfg.TUNER_MODEL.cfg_dict) @staticmethod def get_config_template():
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. @MODELS.register_class() class LatentDiffusionSCETuning(LatentDiffusion): para_dict = {} para_dict.update(LatentDiffusion.para_dict) def __init__(self, cfg, logger): super().__init__(cfg, logger=logger) def init_params(self): super().init_params() self.tuner_model_config = self.cfg.TUNER_MODEL def construct_network(self): super().construct_network() input_block_channels = self.model._input_block_chans sc_tuner_cfg = self.tuner_model_config['SC_TUNER_CFG'] use_layers = self.tuner_model_config.get('USE_LAYERS', None) lsc_tuner_blocks = nn.ModuleList([]) for i, chan in enumerate(input_block_channels[::-1]): if use_layers and i not in use_layers: lsc_tuner_blocks.append(nn.Identity()) continue tuner_cfg = copy.deepcopy(sc_tuner_cfg) tuner_cfg['DIM'] = chan tuner_cfg['TUNER_LENGTH'] = int(chan * tuner_cfg.get('DOWN_RATIO', 1.0)) sc_tuner = TUNERS.build(tuner_cfg, logger=self.logger) lsc_tuner_blocks.append(sc_tuner) self.model.lsc_identity = lsc_tuner_blocks def save_pretrained(self, *args, destination=None, prefix='', keep_vars=False): save_state = { key: value for key, value in self.state_dict().items() if 'lsc_identity' in key } return save_state def save_pretrained_config(self): return copy.deepcopy(self.cfg.TUNER_MODEL.cfg_dict) @staticmethod def get_config_template():
return dict_to_yaml('MODELS',
4
2023-12-21 02:01:48+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(normalized_shape, eps, elementwise_affine)\n return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)" }, { "identifier": "MultiheadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py", "snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv=False,\n add_zero_attn=False,\n self_attention=False,\n encoder_decoder_attention=False,\n q_noise=0.0,\n qn_block_size=8,\n nblocks=1,\n top_k_ratio=None,\n use_value_competition=True,\n shared_memory_attention = False,\n use_topk = False,\n topk = 3,\n num_steps = 5,\n mem_slots = 4,\n null_attention = False,\n regressive = False\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout_module = FairseqDropout(\n dropout, module_name=self.__class__.__name__\n )\n\n self.head_dim = embed_dim // num_heads\n self.shared_memory_attention = shared_memory_attention\n\n print('total heads', self.num_heads)\n print('head dim', self.head_dim)\n\n self.use_topk = use_topk\n self.topk = topk\n\n print('use topk?' + str(self.use_topk))\n print('topk:'+str(self.topk))\n\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?\n self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.v_proj = quant_noise(GroupLinearLayer(self.vdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.q_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.out_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n if self.shared_memory_attention:\n self.bias_k_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n self.bias_k_memory = self.bias_v_memory = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.tpu = False\n\n # 这里表示,如果共享memory_attention的话\n if self.shared_memory_attention:\n print('MEM SLOTS:' + str(mem_slots))\n print('Null attention:' + str(null_attention))\n print('USING SHARED MEMORY ATTENTION +++++++++')\n #self.num_heads = 1\n self.regressive = regressive\n if not regressive: \n self.relational_memory = RelationalMemory(\n mem_slots=mem_slots,\n head_size=self.head_dim , #128\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads, #1\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=1,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps, \n null_attention = null_attention\n )\n else:\n print('USING AUTO REGRESSIVE')\n self.relational_memory = RelationalMemoryRegressive(\n mem_slots=mem_slots,\n head_size=self.head_dim ,\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads,\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=4,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps,\n null_attention = False\n )\n self.memory_size = 128 #self.head_dim * self.num_heads\n '''\n self.mem_att = MHAMemory(\n n_head=4,\n d_model_read=embed_dim,\n d_model_write=self.memory_size,\n d_model_out=embed_dim,\n d_k=32,\n d_v=32,\n grad_sparse=False,\n )\n '''\n self.memory = None # 因为要共享self.memory,所以这里是为了占个位置\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def prepare_for_tpu_(self, **kwargs):\n self.tpu = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n if self.shared_memory_attention:\n nn.init.xavier_uniform_(self.k_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj_memory.weight, gain=1 / math.sqrt(2))\n\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.k_proj_memory.weight)\n # nn.init.xavier_uniform_(self.v_proj_memory.weight)\n # nn.init.xavier_uniform_(self.q_proj_memory.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.out_proj_memory.weight)\n \n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.)\n\n #if self.shared_memory_attention and self.out_proj_memory.bias is not None:\n # nn.init.constant_(self.out_proj.bias, 0.)\n \n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n #if self.shared_memory_attention:\n # if self.bias_k is not None:\n # nn.init.xavier_normal_(self.bias_k_memory)\n # if self.bias_v is not None:\n # nn.init.xavier_normal_(self.bias_v_memory)\n\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n comp = None,\n memory = None\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.onnx_trace\n and not self.tpu # don't use PyTorch version on TPUs\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and False\n ):\n assert key is not None and value is not None\n if self.shared_memory_attention:\n memory,_ = F.multi_head_attention_forward(\n memory,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj_memory.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj_memory.weight,\n self.out_proj_memory.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_memory.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n out,weights = F.multi_head_attention_forward(\n query,\n memory,\n memory,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj_memory.bias, self.v_proj_memory.bias)),\n self.bias_k_memory,\n self.bias_v_memory,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj_memory.weight,\n v_proj_weight=self.v_proj_memory.weight,\n )\n else:\n out, weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n\n ) \n\n return out, memory, weights\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n # 如果不共享memory attention\n if not self.shared_memory_attention:\n\n t1 = time.time()\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n \n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n if comp is not None:\n v = v * comp\n #v_memory = v_memory * comp\n q *= self.scaling\n #q_memory *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = (\n q.contiguous()\n .view(tgt_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if k is not None:\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if v is not None:\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(\n key_padding_mask\n ),\n ],\n dim=1,\n )\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if not self.tpu:\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n else:\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n \n # 是这个\n attn_weights_float = utils.softmax(\n attn_weights, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = self.dropout_module(attn_weights)\n\n assert v is not None\n if self.use_topk:\n k = torch.topk(attn_probs, dim = 2, k = self.topk)\n mask = torch.zeros(attn_probs.size()).to(attn_probs.device)\n mask.scatter_(2, k.indices, 1)\n attn_probs = attn_probs * mask\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n #print('time taken by default mha:' + str(time.time() - t1))\n return attn, None, attn_weights\n \n else: # 共享注意力机制 memory\n t1 = time.time()\n\n # 这个是共享memory的时候\n if self.memory is None:\n self.memory = self.relational_memory.initial_state(query.size(1), query.size(0)).to(query.device)\n\n self.memory = self.memory.to(query.device)\n\n #print(self.memory.size())\n \n \n key = key.transpose(1, 0)\n\n #print(key.size())\n #memory = self.memory[:key.size(0)]\n #print(self.memory.size())\n\n t2 = time.time()\n\n #print(self.memory)\n\n # self.memory只是一个memory更新的方式,它并不是workspace吧!!! lm-workspace这篇代码是不是搞错了\n # 那这个 self.memory \n # 这里是对memory进行更新\n # 利用relational_memory 来对 workspace中的memory进行更新\n _,_, self.memory, out_hx_mem_new = self.relational_memory(\n inputs=key,\n memory=self.memory#.cuda(),\n )\n #print('time taken by relational:' + str(time.time() - t2))\n\n\n\n #query = query.transpose(1, 0)\n #if self.regressive:\n # B, T, D = query.size()\n # query = query.reshape(B * T, -1).unsqueeze(1)\n #out_hx_mem_new, _, _ = self.mem_att(\n # query,#.reshape((bsz, self.num_blocks_out, self.block_size_out)),\n # self.memory,\n # self.memory,\n # )\n\n #z = torch.zeros(self.memory.size(0) - memory.size(0), memory.size(1), memory.size(2)).to(memory.device)\n #memory = torch.cat((memory, z), dim = 0)\n #self.memory = self.memory + memory\n #print('time taken by shared mha:' + str(time.time() - t1))\n #if self.regressive:\n # out_hx_mem_new = out_hx_mem_new.squeeze(1)\n # out_hx_mem_new = out_hx_mem_new.reshape(B, T, -1)\n\n # 这里的memory实际上没啥用处了,emmm 我觉得\n return out_hx_mem_new.transpose(0, 1), memory, None\n \"\"\"\n\n tgt_len = memory.size(0)\n src_len = key.size(0)\n q_memory = self.q_proj_memory(memory)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n q_memory = (\n q_memory.contiguous()\n .view(memory.size(0), bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n\n attn_weights_1 = torch.bmm(q_memory, k.transpose(1, 2))\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights_1 = attn_weights_1.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights_1 = attn_weights_1.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n\n attn_weights_float_1 = utils.softmax(\n attn_weights_1, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights_1 = attn_weights_float_1.type_as(attn_weights_1)\n attn_probs_1 = self.dropout_module(attn_weights_1)\n\n assert v is not None\n memory = torch.bmm(attn_probs_1, v)\n\n memory = memory.permute(1, 0, 2)\n memory = memory.reshape(memory.size(0), bsz, self.num_heads, -1)\n memory = memory.reshape(memory.size(0), bsz, -1)\n\n\n\n q = self.q_proj(query)\n \n k_memory = self.k_proj_memory(memory)\n v_memory = self.v_proj_memory(memory)\n\n q = (\n q.contiguous()\n .view(src_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k_memory = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v_memory = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n attn_weights_2 = torch.bmm(q, k_memory.transpose(1, 2))\n \n attn_weights_float_2 = utils.softmax(\n attn_weights_2, dim=-1, onnx_trace=self.onnx_trace\n )\n \n attn_weights_2 = attn_weights_float_2.type_as(attn_weights_2)\n attn_probs_2 = self.dropout_module(attn_weights_2)\n\n out = torch.bmm(attn_probs_2, v)\n out = out.transpose(0, 1).contiguous().view(src_len, bsz, embed_dim)\n return out, memory, None\n \"\"\"\n \n # 共享参数的时候,或者是共享memory attn的时候,\n def init_memory(self, bs, ts = None, device = None):\n if not self.regressive:\n self.memory = self.relational_memory.initial_state(bs).to(device)\n else:\n self.memory = self.relational_memory.initial_state(bs, ts).to(device)\n\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [filler.float(), key_padding_mask.float()], dim=1\n )\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][\n dim : 2 * dim\n ]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n self.attn_log = None\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size * num_steps, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n\n def multihead_attention(self, input, memory):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def print_log(self):\n print(self.attn_log)\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "MemoryAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/basic_mha.py", "snippet": "class MemoryAttention(nn.Module):\n def __init__(self, n_blocks_query, n_blocks_val, dim_query, dim_val, n_heads=8):\n super(MemoryAttention, self).__init__()\n\n self.n_heads = n_heads\n self.n_blocks_val = n_blocks_val\n self.dim_val = dim_val\n self.block_dim_val = dim_val // self.n_blocks_val\n\n self.n_blocks_query = n_blocks_query\n self.dim_query = dim_query\n self.block_dim_query = dim_query // self.n_blocks_query\n\n self.head_dim = 64\n self.scale = self.head_dim ** -0.5\n\n #self.n_blocks_val * self.block_dim_val\n\n self.query_net = GroupLinearLayer(self.block_dim_query, self.head_dim * self.n_heads, n_blocks_query)\n self.key_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.value_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim_query, n_blocks_query)\n\n def forward(self, q, kv):\n\n #comes in as: bs, pos*emb.\n #positions_attend x T*bs x emb\n\n\n #q = q.permute(1,0,2)\n #kv = kv.permute(1,0,2)\n\n #print('kv shape after permute', kv.shape)\n\n seq_len_q,bsz,_ = q.shape\n seq_len_v,bsz,_ = kv.shape\n\n q = q.reshape((seq_len_q, bsz, self.n_blocks_query * self.block_dim_query))\n\n kv = kv.reshape((seq_len_v, bsz, self.n_blocks_val * self.block_dim_val))\n\n q = self.query_net(q).view(seq_len_q, bsz, self.n_blocks_query, self.n_heads, self.head_dim)\n k = self.key_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n v = self.value_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n\n q = q.transpose(2,3) * self.scale\n k = k.transpose(2,3)\n v = v.transpose(2,3)\n score = torch.matmul(q, k.transpose(3,4))\n #print('score shape', score.shape)\n score = F.softmax(score, dim=-1)\n out = torch.matmul(score, v).transpose(2,3)\n #print('out shape', out.shape)\n score = score.mean(dim=2)\n\n out = out.reshape(seq_len_q, bsz, self.n_blocks_query * self.head_dim * self.n_heads)\n out = self.final(out)\n out = out.view(seq_len_q, bsz, self.dim_query)\n\n\n return out, score" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" } ]
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
15,265
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args)
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5)
0
2023-12-15 13:13:01+00:00
24k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
15,176
base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention() pipeline = AnimationFreeInitPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=DDIMScheduler(**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)) ).to("cuda") # (freeinit) initialize frequency filter for noise reinitialization ------------- pipeline.freq_filter = self.freq_filter # ------------------------------------------------------------------------------- if int(seed_textbox) > 0: seed = int(seed_textbox) else: seed = random.randint(1, 1e16) torch.manual_seed(int(seed)) assert seed == torch.initial_seed() print(f"### seed: {seed}") generator = torch.Generator(device="cuda") generator.manual_seed(seed) sample_output = pipeline( prompt_textbox, negative_prompt = negative_prompt_textbox, num_inference_steps = 25, guidance_scale = 7.5, width = width_slider, height = height_slider, video_length = 16, num_iters = num_iters, use_fast_sampling = True if "use_coarse_to_fine_sampling" in speed_up_options else False, save_intermediate = False, return_orig = True, use_fp16 = True if "use_fp16" in speed_up_options else False ) orig_sample = sample_output.orig_videos sample = sample_output.videos save_sample_path = os.path.join(self.savedir, f"sample.mp4")
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor] self.freq_filter = get_freq_filter( shape, device="cuda", filter_type=self.selected_filter_type, n=4, d_s=self.set_d_s, d_t=self.set_d_t ) def animate( self, base_model_dropdown, motion_module_dropdown, prompt_textbox, negative_prompt_textbox, width_slider, height_slider, seed_textbox, # freeinit params filter_type_dropdown, d_s_slider, d_t_slider, num_iters_slider, # speed up speed_up_options ): # set global seed set_seed(42) d_s = float(d_s_slider) d_t = float(d_t_slider) num_iters = int(num_iters_slider) if self.selected_base_model != base_model_dropdown: self.update_base_model(base_model_dropdown) if self.selected_motion_module != motion_module_dropdown: self.update_motion_module(motion_module_dropdown) self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s self.set_d_t = d_t if self.set_width != width_slider or self.set_height != height_slider or self.selected_filter_type != filter_type_dropdown or self.set_d_s != d_s or self.set_d_t != d_t: self.update_filter(width_slider, height_slider, filter_type_dropdown, d_s, d_t) if is_xformers_available(): self.unet.enable_xformers_memory_efficient_attention() pipeline = AnimationFreeInitPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=DDIMScheduler(**OmegaConf.to_container(self.inference_config.noise_scheduler_kwargs)) ).to("cuda") # (freeinit) initialize frequency filter for noise reinitialization ------------- pipeline.freq_filter = self.freq_filter # ------------------------------------------------------------------------------- if int(seed_textbox) > 0: seed = int(seed_textbox) else: seed = random.randint(1, 1e16) torch.manual_seed(int(seed)) assert seed == torch.initial_seed() print(f"### seed: {seed}") generator = torch.Generator(device="cuda") generator.manual_seed(seed) sample_output = pipeline( prompt_textbox, negative_prompt = negative_prompt_textbox, num_inference_steps = 25, guidance_scale = 7.5, width = width_slider, height = height_slider, video_length = 16, num_iters = num_iters, use_fast_sampling = True if "use_coarse_to_fine_sampling" in speed_up_options else False, save_intermediate = False, return_orig = True, use_fp16 = True if "use_fp16" in speed_up_options else False ) orig_sample = sample_output.orig_videos sample = sample_output.videos save_sample_path = os.path.join(self.savedir, f"sample.mp4")
save_videos_grid(sample, save_sample_path)
2
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:bool=True, step:int=None) -> None:\n '''\n Write `msg` to file named `self.topic`\n\n Parameters\n ----------\n msg : str\n message to be written\n step : int\n simulation step is written before the message to provide additional information\n default is `None` (nothing is written before the message)\n '''\n if not self.enabled: return\n\n # The log folder is only created if needed\n if Logger._folder is None: \n rnd = ''.join(random.choices(ascii_uppercase, k=6)) # Useful if multiple processes are running in parallel \n Logger._folder = \"./logs/\" + datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S__\") + rnd + \"/\"\n print(\"\\nLogger Info: see\",Logger._folder)\n Path(Logger._folder).mkdir(parents=True, exist_ok=True)\n\n self.no_of_entries += 1\n\n with open(Logger._folder + self.topic + \".log\", 'a+') as f:\n prefix = \"\"\n write_step = step is not None\n if timestamp or write_step:\n prefix = \"{\"\n if timestamp: \n prefix += datetime.now().strftime(\"%a %H:%M:%S\")\n if write_step: prefix += \" \"\n if write_step:\n prefix += f'Step:{step}'\n prefix += \"} \"\n f.write(prefix + msg + \"\\n\")" }, { "identifier": "Matrix_4x4", "path": "math_ops/Matrix_4x4.py", "snippet": "class Matrix_4x4():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_4x4( ) # create identity matrix\n b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix\n c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix\n d = Matrix_4x4( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(4)\n elif type(matrix) == Matrix_4x4: \n self.m = np.copy(matrix.m)\n elif type(matrix) == Matrix_3x3: \n self.m = np.identity(4)\n self.m[0:3,0:3] = matrix.m\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (4,4) #reshape if needed, throw error if impossible\n\n\n @classmethod\n def from_translation(cls, translation_vec):\n '''\n Create transformation matrix from translation_vec translation\n e.g. Matrix_4x4.from_translation((a,b,c))\n output: [[1,0,0,a],[0,1,0,b],[0,0,1,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n @classmethod\n def from_3x3_and_translation(cls, mat3x3:Matrix_3x3, translation_vec):\n '''\n Create transformation matrix from rotation matrix (3x3) and translation\n e.g. Matrix_4x4.from_3x3_and_translation(r,(a,b,c)) \n output: [[r00,r01,r02,a],[r10,r11,r12,b],[r20,r21,r22,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,0:3] = mat3x3.m\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n def translate(self, translation_vec, in_place=False):\n '''\n Translates the current transformation matrix\n\n Parameters\n ----------\n translation_vec : array_like, length 3\n translation vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place\n * False: a new matrix is returned and the current one is not changed \n\n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n vec = np.array([*translation_vec,1])# conversion to 4D vector\n np.matmul(self.m, vec, out=vec) # compute only 4th column\n\n if in_place:\n self.m[:,3] = vec\n return self\n else:\n ret = Matrix_4x4(self.m)\n ret.m[:,3] = vec\n return ret\n\n\n def get_translation(self):\n ''' Get translation vector (x,y,z) '''\n return self.m[0:3,3] # return view\n\n def get_x(self):\n return self.m[0,3]\n\n def get_y(self):\n return self.m[1,3]\n\n def get_z(self):\n return self.m[2,3]\n\n def get_rotation_4x4(self):\n ''' Get Matrix_4x4 without translation ''' \n mat = Matrix_4x4(self)\n mat.m[0:3,3] = 0\n return mat\n\n def get_rotation(self):\n ''' Get rotation Matrix_3x3 '''\n return Matrix_3x3(self.m[0:3,0:3])\n\n def get_distance(self):\n ''' Get translation vector length '''\n return np.linalg.norm(self.m[0:3,3])\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n \n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(np.clip(self.m[2,2],-1,1)) * 180 / pi)\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n\n # shortcuts for rotation around 1 axis\n if rotation_vec[0]==0:\n if rotation_vec[1]==0:\n if rotation_vec[2]==1:\n return self.rotate_z_rad(rotation_rad, in_place)\n elif rotation_vec[2]==-1:\n return self.rotate_z_rad(-rotation_rad, in_place)\n elif rotation_vec[2]==0:\n if rotation_vec[1]==1:\n return self.rotate_y_rad(rotation_rad, in_place)\n elif rotation_vec[1]==-1:\n return self.rotate_y_rad(-rotation_rad, in_place)\n elif rotation_vec[1]==0 and rotation_vec[2]==0:\n if rotation_vec[0]==1:\n return self.rotate_x_rad(rotation_rad, in_place)\n elif rotation_vec[0]==-1:\n return self.rotate_x_rad(-rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys, 0],\n [xyc1 + zs, yyc1 + c, yzc1 - xs, 0],\n [xzc1 - ys, yzc1 + xs, zzc1 + c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current transformation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_4x4(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False):\n '''\n Multiplies the current transformation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed (if mat is a 4x4 matrix)\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n if type(mat) == Matrix_4x4: \n mat = mat.m\n else:\n mat = np.asarray(mat) # conversion to array, if needed\n if mat.ndim == 1: # multiplication by 3D vector\n vec = np.append(mat,1) # conversion to 4D vector\n return np.matmul(self.m, vec)[0:3] # conversion to 3D vector\n\n if in_place:\n np.matmul(self.m, mat, self.m)\n return self\n else:\n return Matrix_4x4(np.matmul(self.m, mat))\n\n def __call__(self,mat, is_spherical=False):\n '''\n Multiplies the current transformation matrix by mat and returns a new matrix or vector\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n is_spherical : bool\n only relevant if mat is a 3D vector, True if it uses spherical coordinates\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix; \n a 3D vector is returned if mat is a vector\n '''\n\n if is_spherical and mat.ndim == 1: mat = M.deg_sph2cart(mat)\n return self.multiply(mat,False)" }, { "identifier": "Draw", "path": "world/commons/Draw.py", "snippet": "class Draw():\n _socket = None\n\n def __init__(self, is_enabled:bool, unum:int, host:str, port:int) -> None:\n self.enabled = is_enabled \n self._is_team_right = None\n self._unum = unum \n self._prefix = f'?{unum}_'.encode() # temporary prefix that should never be used in normal circumstances\n \n #Create one socket for all instances\n if Draw._socket is None:\n Draw._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )\n Draw._socket.connect((host, port))\n Draw.clear_all()\n\n\n def set_team_side(self, is_right):\n ''' Called by world parser to switch side '''\n '''\n Generate an appropriate player ID\n RoboViz has a bug/feature: we send \"swap buffers for player: 'l_1' and RoboViz\n will swap every buffer that contains 'l_1' in the name, including \n 'l_10' and 'l_11'. To avoid that, we swap the separator to 'l-10', 'l-11'\n '''\n self._is_team_right = is_right\n self._prefix = f\"{'r' if is_right else 'l'}{'_' if self._unum < 10 else '-'}{self._unum}_\".encode() #e.g. b'l_5', b'l-10'\n\n\n @staticmethod\n def _send(msg, id, flush):\n ''' Private method to send message if RoboViz is accessible '''\n try:\n if flush:\n Draw._socket.send(msg + id + b'\\x00\\x00\\x00' + id + b'\\x00')\n else:\n Draw._socket.send(msg + id + b'\\x00')\n except ConnectionRefusedError:\n pass\n\n \n def circle(self, pos2d, radius, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw circle\n\n Examples\n ----------\n Circle in 2D (z=0): circle((-1,2), 3, 2, Draw.Color.red, \"my_circle\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos2d).any(), \"Argument 'pos2d' contains 'nan' values\"\n\n if self._is_team_right:\n pos2d = (-pos2d[0],-pos2d[1]) \n\n msg = b'\\x01\\x00' + (\n f'{f\"{pos2d[0] :.4f}\":.6s}'\n f'{f\"{pos2d[1] :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def line(self, p1, p2, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw line\n\n Examples\n ----------\n Line in 3D: line((0,0,0), (0,0,2), 3, Draw.Color.red, \"my_line\") \n Line in 2D (z=0): line((0,0), (0,1), 3, Draw.Color.red, \"my_line\") \n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(p1).any(), \"Argument 'p1' contains 'nan' values\"\n assert not np.isnan(p2).any(), \"Argument 'p2' contains 'nan' values\"\n\n z1 = p1[2] if len(p1)==3 else 0\n z2 = p2[2] if len(p2)==3 else 0\n\n if self._is_team_right: \n p1 = (-p1[0],-p1[1],p1[2]) if len(p1)==3 else (-p1[0],-p1[1])\n p2 = (-p2[0],-p2[1],p2[2]) if len(p2)==3 else (-p2[0],-p2[1])\n\n msg = b'\\x01\\x01' + (\n f'{f\"{p1[0] :.4f}\":.6s}'\n f'{f\"{p1[1] :.4f}\":.6s}'\n f'{f\"{z1 :.4f}\":.6s}'\n f'{f\"{p2[0] :.4f}\":.6s}'\n f'{f\"{p2[1] :.4f}\":.6s}'\n f'{f\"{z2 :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n\n Draw._send(msg, self._prefix + id.encode(), flush)\n \n\n def point(self, pos, size, color:bytes, id:str, flush=True):\n ''' \n Draw point\n\n Examples\n ----------\n Point in 3D: point((1,1,1), 3, Draw.Color.red, \"my_point\")\n Point in 2D (z=0): point((1,1), 3, Draw.Color.red, \"my_point\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x02' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{size :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def sphere(self, pos, radius, color:bytes, id:str, flush=True):\n ''' \n Draw sphere\n\n Examples\n ----------\n Sphere in 3D: sphere((1,1,1), 3, Draw.Color.red, \"my_sphere\")\n Sphere in 2D (z=0): sphere((1,1), 3, Draw.Color.red, \"my_sphere\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x03' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def polygon(self, vertices, color:bytes, alpha:int, id:str, flush=True):\n ''' \n Draw polygon\n\n Examples\n ----------\n Polygon in 3D: polygon(((0,0,0),(1,0,0),(0,1,0)), Draw.Color.red, 255, \"my_polygon\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert 0<=alpha<=255, \"The alpha channel (degree of opacity) must be in range [0,255]\"\n\n if self._is_team_right: \n vertices = [(-v[0],-v[1],v[2]) for v in vertices]\n\n msg = b'\\x01\\x04' + bytes([len(vertices)]) + color + alpha.to_bytes(1,'big')\n\n for v in vertices:\n msg += (\n f'{f\"{v[0] :.4f}\":.6s}'\n f'{f\"{v[1] :.4f}\":.6s}'\n f'{f\"{v[2] :.4f}\":.6s}').encode()\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def annotation(self, pos, text, color:bytes, id:str, flush=True):\n ''' \n Draw annotation\n\n Examples\n ----------\n Annotation in 3D: annotation((1,1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n Annotation in 2D (z=0): annotation((1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n '''\n if not self.enabled: return\n if type(text) != bytes: text = str(text).encode()\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x02\\x00' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}').encode() + color + text + b'\\x00'\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n \n def arrow(self, p1, p2, arrowhead_size, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw arrow\n\n Examples\n ----------\n Arrow in 3D: arrow((0,0,0), (0,0,2), 0.1, 3, Draw.Color.red, \"my_arrow\")\n Arrow in 2D (z=0): arrow((0,0), (0,1), 0.1, 3, Draw.Color.red, \"my_arrow\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n\n # No need to invert sides, the called shapes will handle that\n if len(p1)==2: p1 = M.to_3d(p1) \n else: p1 = np.asarray(p1)\n if len(p2)==2: p2 = M.to_3d(p2) \n else: p2 = np.asarray(p2)\n\n vec = p2-p1\n vec_size = np.linalg.norm(vec)\n if vec_size == 0: return #return without warning/error\n if arrowhead_size > vec_size: arrowhead_size = vec_size\n\n ground_proj_perpendicular = np.array([ vec[1], -vec[0], 0 ])\n\n if np.all(ground_proj_perpendicular == 0): #vertical arrow\n ground_proj_perpendicular = np.array([ arrowhead_size/2, 0, 0 ])\n else:\n ground_proj_perpendicular *= arrowhead_size/2 / np.linalg.norm(ground_proj_perpendicular)\n\n head_start = p2 - vec * (arrowhead_size/vec_size)\n head_pt1 = head_start + ground_proj_perpendicular\n head_pt2 = head_start - ground_proj_perpendicular\n\n self.line(p1,p2,thickness,color,id,False)\n self.line(p2,head_pt1,thickness,color,id,False)\n self.line(p2,head_pt2,thickness,color,id,flush)\n\n\n def flush(self, id):\n ''' Flush specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), False)\n\n def clear(self, id):\n ''' Clear specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), True) #swap buffer twice\n\n\n def clear_player(self):\n ''' Clear all drawings made by this player '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix, True) #swap buffer twice\n\n\n @staticmethod\n def clear_all():\n ''' Clear all drawings of all players '''\n if Draw._socket is not None:\n Draw._send(b'\\x00\\x00\\x00\\x00\\x00',b'',False) #swap buffer twice using no id\n\n\n class Color():\n '''\n Based on X11 colors\n The names are restructured to make better suggestions\n '''\n pink_violet = b'\\xC7\\x15\\x85'\n pink_hot = b'\\xFF\\x14\\x93'\n pink_violet_pale = b'\\xDB\\x70\\x93'\n pink = b'\\xFF\\x69\\xB4'\n pink_pale = b'\\xFF\\xB6\\xC1'\n \n red_dark = b'\\x8B\\x00\\x00'\n red = b'\\xFF\\x00\\x00'\n red_brick = b'\\xB2\\x22\\x22'\n red_crimson = b'\\xDC\\x14\\x3C'\n red_indian = b'\\xCD\\x5C\\x5C'\n red_salmon = b'\\xFA\\x80\\x72'\n\n orange_red = b'\\xFF\\x45\\x00'\n orange = b'\\xFF\\x8C\\x00'\n orange_ligth = b'\\xFF\\xA5\\x00'\n\n yellow_gold = b'\\xFF\\xD7\\x00'\n yellow = b'\\xFF\\xFF\\x00'\n yellow_light = b'\\xBD\\xB7\\x6B'\n\n brown_maroon =b'\\x80\\x00\\x00'\n brown_dark = b'\\x8B\\x45\\x13'\n brown = b'\\xA0\\x52\\x2D'\n brown_gold = b'\\xB8\\x86\\x0B'\n brown_light = b'\\xCD\\x85\\x3F'\n brown_pale = b'\\xDE\\xB8\\x87'\n\n green_dark = b'\\x00\\x64\\x00' \n green = b'\\x00\\x80\\x00' \n green_lime = b'\\x32\\xCD\\x32' \n green_light = b'\\x00\\xFF\\x00' \n green_lawn = b'\\x7C\\xFC\\x00' \n green_pale = b'\\x90\\xEE\\x90' \n\n cyan_dark = b'\\x00\\x80\\x80' \n cyan_medium = b'\\x00\\xCE\\xD1' \n cyan = b'\\x00\\xFF\\xFF' \n cyan_light = b'\\xAF\\xEE\\xEE'\n\n blue_dark = b'\\x00\\x00\\x8B' \n blue = b'\\x00\\x00\\xFF' \n blue_royal = b'\\x41\\x69\\xE1' \n blue_medium = b'\\x1E\\x90\\xFF' \n blue_light = b'\\x00\\xBF\\xFF'\n blue_pale = b'\\x87\\xCE\\xEB'\n\n purple_violet = b'\\x94\\x00\\xD3' \n purple_magenta = b'\\xFF\\x00\\xFF' \n purple_light = b'\\xBA\\x55\\xD3' \n purple_pale = b'\\xDD\\xA0\\xDD'\n\n white = b'\\xFF\\xFF\\xFF'\n gray_10 = b'\\xE6\\xE6\\xE6'\n gray_20 = b'\\xCC\\xCC\\xCC'\n gray_30 = b'\\xB2\\xB2\\xB2' \n gray_40 = b'\\x99\\x99\\x99'\n gray_50 = b'\\x80\\x80\\x80'\n gray_60 = b'\\x66\\x66\\x66'\n gray_70 = b'\\x4C\\x4C\\x4C'\n gray_80 = b'\\x33\\x33\\x33'\n gray_90 = b'\\x1A\\x1A\\x1A'\n black = b'\\x00\\x00\\x00' \n\n @staticmethod\n def get(r,g,b):\n ''' Get RGB color (0-255) '''\n return bytes([int(r),int(g),int(b)])" }, { "identifier": "Other_Robot", "path": "world/commons/Other_Robot.py", "snippet": "class Other_Robot():\n def __init__(self, unum, is_teammate) -> None:\n self.unum = unum # convenient variable to indicate uniform number (same as other robot's index + 1)\n self.is_self = False # convenient flag to indicate if this robot is self\n self.is_teammate = is_teammate # convenient variable to indicate if this robot is from our team\n self.is_visible = False # True if this robot was seen in the last message from the server (it doesn't mean we know its absolute location)\n self.body_parts_cart_rel_pos = dict() # cartesian relative position of the robot's visible body parts\n self.body_parts_sph_rel_pos = dict() # spherical relative position of the robot's visible body parts\n self.vel_filter = 0.3 # EMA filter coefficient applied to self.state_filtered_velocity\n self.vel_decay = 0.95 # velocity decay at every vision cycle (neutralized if velocity is updated)\n\n\n # State variables: these are computed when this robot is visible and when the original robot is able to self-locate\n self.state_fallen = False # true if the robot is lying down (updated when head is visible)\n self.state_last_update = 0 # World.time_local_ms when the state was last updated\n self.state_horizontal_dist = 0 # horizontal head distance if head is visible, otherwise, average horizontal distance of visible body parts (the distance is updated by vision or radio when state_abs_pos gets a new value, but also when the other player is not visible, by assuming its last position)\n self.state_abs_pos = None # 3D head position if head is visible, otherwise, 2D average position of visible body parts, or, 2D radio head position\n self.state_orientation = 0 # orientation based on pair of lower arms or feet, or average of both (WARNING: may be older than state_last_update) \n self.state_ground_area = None # (pt_2d,radius) projection of player area on ground (circle), not precise if farther than 3m (for performance), useful for obstacle avoidance when it falls\n self.state_body_parts_abs_pos = dict() # 3D absolute position of each body part\n self.state_filtered_velocity = np.zeros(3) # 3D filtered velocity (m/s) (if the head is not visible, the 2D part is updated and v.z decays)" }, { "identifier": "Robot", "path": "world/Robot.py", "snippet": "class Robot():\n STEPTIME = 0.02 # Fixed step time\n VISUALSTEP = 0.04 # Fixed visual step time\n SQ_STEPTIME = STEPTIME * STEPTIME\n GRAVITY = np.array([0,0,-9.81])\n IMU_DECAY = 0.996 #IMU's velocity decay\n \n #------------------ constants to force symmetry in joints/effectors\n\n MAP_PERCEPTOR_TO_INDEX = {\"hj1\":0, \"hj2\":1, \"llj1\":2, \"rlj1\":3,\n \"llj2\":4, \"rlj2\":5, \"llj3\":6, \"rlj3\":7,\n \"llj4\":8, \"rlj4\":9, \"llj5\":10,\"rlj5\":11,\n \"llj6\":12,\"rlj6\":13,\"laj1\":14,\"raj1\":15,\n \"laj2\":16,\"raj2\":17,\"laj3\":18,\"raj3\":19,\n \"laj4\":20,\"raj4\":21,\"llj7\":22,\"rlj7\":23 }\n\n # Fix symmetry issues 1a/4 (identification) \n FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'}\n FIX_INDICES_LIST = [5,13,17,18,20]\n\n # Recommended height for unofficial beam (near ground)\n BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4]\n\n\n def __init__(self, unum:int, robot_type:int) -> None:\n robot_xml = \"nao\"+str(robot_type)+\".xml\" # Typical NAO file name\n self.type = robot_type\n self.beam_height = Robot.BEAM_HEIGHTS[robot_type]\n self.no_of_joints = 24 if robot_type == 4 else 22 \n\n #Fix symmetry issues 1b/4 (identification) \n self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints)\n self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1\n\n self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects'\n self.unum = unum # Robot's uniform number\n self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s)\n self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2)\n self.frp = dict() # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {\"lf\":(px,py,pz,fx,fy,fz)}\n self.feet_toes_last_touch = {\"lf\":0,\"rf\":0,\"lf1\":0,\"rf1\":0} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" World.time_local_ms when foot/toe last touched any surface\n self.feet_toes_are_touching = {\"lf\":False,\"rf\":False,\"lf1\":False,\"rf1\":False} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" True if touching in last received server message\n self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies\n self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m)\n\n # Joint variables are optimized for performance / array operations\n self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg)\n self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s)\n self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info)\n self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix\n\n # Localization variables relative to head\n self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field\n self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head\n self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field\n self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head\n self.loc_head_position = np.zeros(3) # Absolute head position (m)\n self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position)\n self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy)\n self.loc_head_orientation = 0 # Head orientation (deg)\n self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_last_update = 0 # World.time_local_ms when the localization was last updated\n self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio\n self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc)\n self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position)\n\n # Localization variables relative to torso\n self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field \n self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field\n self.loc_torso_roll = 0 # Torso roll (deg)\n self.loc_torso_pitch = 0 # Torso pitch (deg) \n self.loc_torso_orientation = 0 # Torso orientation (deg)\n self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis)\n self.loc_torso_position = np.zeros(3) # Absolute torso position (m)\n self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s)\n self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2)\n\n # Other localization variables\n self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m)\n self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg)\n self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m)\n self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s)\n\n # Localization special variables\n '''\n self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs.\n There are situations in which the rotation and translation cannot be computed, \n but the z-coordinate can still be found through vision, in which case:\n self.loc_is_up_to_date is False\n self.loc_head_z_is_up_to_date is True\n It should be used in applications which rely on z as an independent coordinate, such\n as detecting if the robot has fallen, or as an observation for machine learning.\n It should NEVER be used for 3D transformations.\n '''\n self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m)\n self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed\n self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s)\n\n # Localization + Gyroscope\n # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle\n self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro)\n self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro)\n self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro)\n self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro)\n self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro)\n self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information \n\n # Localization + Gyroscope + Accelerometer\n # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to \n # wrong acceleration -> wrong velocity -> wrong position\n self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc)\n self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc)\n self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc)\n self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc)\n\n\n #Using explicit variables to enable IDE suggestions\n self.J_HEAD_YAW = 0\n self.J_HEAD_PITCH = 1\n self.J_LLEG_YAW_PITCH = 2\n self.J_RLEG_YAW_PITCH = 3\n self.J_LLEG_ROLL = 4\n self.J_RLEG_ROLL = 5\n self.J_LLEG_PITCH = 6\n self.J_RLEG_PITCH = 7\n self.J_LKNEE = 8\n self.J_RKNEE = 9\n self.J_LFOOT_PITCH = 10\n self.J_RFOOT_PITCH = 11\n self.J_LFOOT_ROLL = 12\n self.J_RFOOT_ROLL = 13\n self.J_LARM_PITCH = 14\n self.J_RARM_PITCH = 15\n self.J_LARM_ROLL = 16\n self.J_RARM_ROLL = 17\n self.J_LELBOW_YAW = 18\n self.J_RELBOW_YAW = 19\n self.J_LELBOW_ROLL = 20\n self.J_RELBOW_ROLL = 21\n self.J_LTOE_PITCH = 22\n self.J_RTOE_PITCH = 23\n\n\n #------------------ parse robot xml\n\n dir = M.get_active_directory(\"/world/commons/robots/\")\n robot_xml_root = xmlp.parse(dir + robot_xml).getroot()\n\n joint_no = 0\n for child in robot_xml_root:\n if child.tag == \"bodypart\":\n self.body_parts[child.attrib['name']] = Body_Part(child.attrib['mass'])\n elif child.tag == \"joint\":\n self.joints_info[joint_no] = Joint_Info(child)\n self.joints_position[joint_no] = 0.0\n ji = self.joints_info[joint_no]\n\n #save joint if body part is 1st anchor (to simplify model traversal in a single direction)\n self.body_parts[ji.anchor0_part].joints.append(Robot.MAP_PERCEPTOR_TO_INDEX[ji.perceptor]) \n\n joint_no += 1\n if joint_no == self.no_of_joints: break #ignore extra joints\n\n else:\n raise NotImplementedError\n\n assert joint_no == self.no_of_joints, \"The Robot XML and the robot type don't match!\"\n\n\n def get_head_abs_vel(self, history_steps:int):\n '''\n Get robot's head absolute velocity (m/s)\n\n Parameters\n ----------\n history_steps : int\n number of history steps to consider [1,40]\n\n Examples\n --------\n get_head_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04\n get_head_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08\n get_head_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12\n '''\n assert 1 <= history_steps <= 40, \"Argument 'history_steps' must be in range [1,40]\"\n\n if len(self.loc_head_position_history) == 0:\n return np.zeros(3)\n\n h_step = min(history_steps, len(self.loc_head_position_history))\n t = h_step * Robot.VISUALSTEP\n\n return (self.loc_head_position - self.loc_head_position_history[h_step-1]) / t\n \n\n def _initialize_kinematics(self):\n\n #starting with head\n parts={\"head\"}\n sequential_body_parts = [\"head\"]\n\n while len(parts) > 0:\n part = parts.pop()\n\n for j in self.body_parts[part].joints:\n\n p = self.joints_info[j].anchor1_part\n\n if len(self.body_parts[p].joints) > 0: #add body part if it is the 1st anchor of some joint\n parts.add(p)\n sequential_body_parts.append(p)\n\n self.fwd_kinematics_list = [(self.body_parts[part],j, self.body_parts[self.joints_info[j].anchor1_part] ) \n for part in sequential_body_parts for j in self.body_parts[part].joints]\n\n #Fix symmetry issues 4/4 (kinematics)\n for i in Robot.FIX_INDICES_LIST:\n self.joints_info[i].axes *= -1\n aux = self.joints_info[i].min\n self.joints_info[i].min = -self.joints_info[i].max\n self.joints_info[i].max = -aux\n\n\n def update_localization(self, localization_raw, time_local_ms): \n\n # parse raw data\n loc = localization_raw.astype(float) #32bits to 64bits for consistency\n self.loc_is_up_to_date = bool(loc[32])\n self.loc_head_z_is_up_to_date = bool(loc[34])\n\n if self.loc_head_z_is_up_to_date:\n time_diff = (time_local_ms - self.loc_head_z_last_update) / 1000 \n self.loc_head_z_vel = (loc[33] - self.loc_head_z) / time_diff\n self.loc_head_z = loc[33]\n self.loc_head_z_last_update = time_local_ms\n\n # Save last position to history at every vision cycle (even if not up to date) (update_localization is only called at vision cycles)\n self.loc_head_position_history.appendleft(np.copy(self.loc_head_position))\n\n if self.loc_is_up_to_date:\n time_diff = (time_local_ms - self.loc_last_update) / 1000\n self.loc_last_update = time_local_ms\n self.loc_head_to_field_transform.m[:] = loc[0:16].reshape((4,4))\n self.loc_field_to_head_transform.m[:] = loc[16:32].reshape((4,4))\n \n # extract data (related to the robot's head)\n self.loc_rotation_head_to_field = self.loc_head_to_field_transform.get_rotation()\n self.loc_rotation_field_to_head = self.loc_field_to_head_transform.get_rotation()\n p = self.loc_head_to_field_transform.get_translation()\n self.loc_head_velocity = (p - self.loc_head_position) / time_diff\n self.loc_head_position = p\n self.loc_head_position_last_update = time_local_ms\n self.loc_head_orientation = self.loc_head_to_field_transform.get_yaw_deg()\n self.radio_fallen_state = False\n\n # extract data (related to the center of mass)\n p = self.loc_head_to_field_transform(self.rel_cart_CoM_position)\n self.loc_CoM_velocity = (p - self.loc_CoM_position) / time_diff\n self.loc_CoM_position = p\n\n # extract data (related to the robot's torso)\n t = self.get_body_part_to_field_transform('torso')\n self.loc_torso_to_field_transform = t\n self.loc_torso_to_field_rotation = t.get_rotation()\n self.loc_torso_orientation = t.get_yaw_deg()\n self.loc_torso_pitch = t.get_pitch_deg()\n self.loc_torso_roll = t.get_roll_deg()\n self.loc_torso_inclination = t.get_inclination_deg()\n p = t.get_translation()\n self.loc_torso_velocity = (p - self.loc_torso_position) / time_diff\n self.loc_torso_position = p\n self.loc_torso_acceleration = self.loc_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n\n\n def head_to_body_part_transform(self, body_part_name, coords, is_batch=False):\n '''\n If coord is a vector or list of vectors:\n Convert cartesian coordinates that are relative to head to coordinates that are relative to a body part \n\n If coord is a Matrix_4x4 or a list of Matrix_4x4:\n Convert pose that is relative to head to a pose that is relative to a body part \n \n Parameters\n ----------\n body_part_name : `str`\n name of body part (given by the robot's XML)\n coords : array_like\n One 3D position or list of 3D positions\n is_batch : `bool`\n Indicates if coords is a batch of 3D positions\n\n Returns\n -------\n coord : `list` or ndarray\n A numpy array is returned if is_batch is False, otherwise, a list of arrays is returned\n '''\n head_to_bp_transform : Matrix_4x4 = self.body_parts[body_part_name].transform.invert()\n \n if is_batch:\n return [head_to_bp_transform(c) for c in coords]\n else:\n return head_to_bp_transform(coords)\n\n\n\n def get_body_part_to_field_transform(self, body_part_name) -> Matrix_4x4:\n '''\n Computes the transformation matrix from body part to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.body_parts[body_part_name].transform)\n\n def get_body_part_abs_position(self, body_part_name) -> np.ndarray:\n '''\n Computes the absolute position of a body part considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_body_part_to_field_transform(body_part_name).get_translation()\n\n def get_joint_to_field_transform(self, joint_index) -> Matrix_4x4:\n '''\n Computes the transformation matrix from joint to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.joints_transform[joint_index])\n\n def get_joint_abs_position(self, joint_index) -> np.ndarray:\n '''\n Computes the absolute position of a joint considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_joint_to_field_transform(joint_index).get_translation()\n\n def update_pose(self):\n\n if self.fwd_kinematics_list is None:\n self._initialize_kinematics()\n\n for body_part, j, child_body_part in self.fwd_kinematics_list:\n ji = self.joints_info[j]\n self.joints_transform[j].m[:] = body_part.transform.m\n self.joints_transform[j].translate(ji.anchor0_axes, True)\n child_body_part.transform.m[:] = self.joints_transform[j].m\n child_body_part.transform.rotate_deg(ji.axes, self.joints_position[j], True)\n child_body_part.transform.translate(ji.anchor1_axes_neg, True)\n\n self.rel_cart_CoM_position = np.average([b.transform.get_translation() for b in self.body_parts.values()], 0,\n [b.mass for b in self.body_parts.values()])\n\n\n def update_imu(self, time_local_ms):\n\n # update IMU\n if self.loc_is_up_to_date:\n self.imu_torso_roll = self.loc_torso_roll\n self.imu_torso_pitch = self.loc_torso_pitch \n self.imu_torso_orientation = self.loc_torso_orientation\n self.imu_torso_inclination = self.loc_torso_inclination\n self.imu_torso_to_field_rotation.m[:] = self.loc_torso_to_field_rotation.m\n self.imu_weak_torso_to_field_transform.m[:] = self.loc_torso_to_field_transform.m\n self.imu_weak_head_to_field_transform.m[:] = self.loc_head_to_field_transform.m\n self.imu_weak_field_to_head_transform.m[:] = self.loc_field_to_head_transform.m\n self.imu_weak_torso_position[:] = self.loc_torso_position\n self.imu_weak_torso_velocity[:] = self.loc_torso_velocity\n self.imu_weak_torso_acceleration[:] = self.loc_torso_acceleration\n self.imu_weak_torso_next_position = self.loc_torso_position + self.loc_torso_velocity * Robot.STEPTIME + self.loc_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.loc_torso_velocity + self.loc_torso_acceleration * Robot.STEPTIME\n self.imu_weak_CoM_position[:] = self.loc_CoM_position\n self.imu_weak_CoM_velocity[:] = self.loc_CoM_velocity\n self.imu_last_visual_update = time_local_ms\n else:\n g = self.gyro / 50 # convert degrees per second to degrees per step\n\n self.imu_torso_to_field_rotation.multiply( Matrix_3x3.from_rotation_deg(g), in_place=True, reverse_order=True)\n\n self.imu_torso_orientation = self.imu_torso_to_field_rotation.get_yaw_deg()\n self.imu_torso_pitch = self.imu_torso_to_field_rotation.get_pitch_deg()\n self.imu_torso_roll = self.imu_torso_to_field_rotation.get_roll_deg()\n\n self.imu_torso_inclination = atan(sqrt(tan(self.imu_torso_roll/180*pi)**2+tan(self.imu_torso_pitch/180*pi)**2))*180/pi\n\n # Update position and velocity until 0.2 seconds has passed since last visual update\n if time_local_ms < self.imu_last_visual_update + 200:\n self.imu_weak_torso_position[:] = self.imu_weak_torso_next_position\n if self.imu_weak_torso_position[2] < 0: self.imu_weak_torso_position[2] = 0 # limit z coordinate to positive values\n self.imu_weak_torso_velocity[:] = self.imu_weak_torso_next_velocity * Robot.IMU_DECAY # stability tradeoff\n else:\n self.imu_weak_torso_velocity *= 0.97 # without visual updates for 0.2s, the position is locked, and the velocity decays to zero\n\n # convert proper acceleration to coordinate acceleration and fix rounding bias\n self.imu_weak_torso_acceleration = self.imu_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n self.imu_weak_torso_to_field_transform = Matrix_4x4.from_3x3_and_translation(self.imu_torso_to_field_rotation,self.imu_weak_torso_position)\n self.imu_weak_head_to_field_transform = self.imu_weak_torso_to_field_transform.multiply(self.body_parts[\"torso\"].transform.invert())\n self.imu_weak_field_to_head_transform = self.imu_weak_head_to_field_transform.invert()\n p = self.imu_weak_head_to_field_transform(self.rel_cart_CoM_position)\n self.imu_weak_CoM_velocity = (p-self.imu_weak_CoM_position)/Robot.STEPTIME\n self.imu_weak_CoM_position = p\n\n # Next Position = x0 + v0*t + 0.5*a*t^2, Next velocity = v0 + a*t\n self.imu_weak_torso_next_position = self.imu_weak_torso_position + self.imu_weak_torso_velocity * Robot.STEPTIME + self.imu_weak_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.imu_weak_torso_velocity + self.imu_weak_torso_acceleration * Robot.STEPTIME\n\n\n\n def set_joints_target_position_direct(self,indices,values:np.ndarray,harmonize=True,max_speed=7.03,tolerance=0.012,limit_joints=True) -> int:\n '''\n Computes the speed of a list of joints, taking as argument the target position\n\n Parameters\n ----------\n indices : `int`/`list`/`slice`/numpy array\n joint indices\n values : numpy array \n target position for each listed joint index\n harmonize : `bool`\n if True, all joints reach target at same time\n max_speed : `float`\n max. speed for all joints in deg/step\n Most joints have a maximum speed of 351.77 deg/s according to rcssserver3d/data/rsg/agent/nao/hingejoint.rsg\n That translates as 7.0354 deg/step or 6.1395 rad/s\n tolerance : `float`\n angle error tolerance (in degrees) to return that target was reached (returns -1)\n limit_joints : `bool`\n limit values to the joints' range of motion\n\n Returns\n -------\n remaining_steps : `int`\n predicted number of remaining steps or -1 if target was already reached\n\n Examples\n -------\n (let p[tx] be the joint position at t=x)\n\n Example for return value: moving joint[0] from 0deg to 10deg\n pos[t0]: 0, speed[t0]: 7deg/step, ret=2 # target will predictedly be reached in 2 steps\n pos[t1]: 7, speed[t1]: 3deg/step, ret=1 # target will predictedly be reached in 1 step (send final action)\n pos[t2]: 10, speed[t2]: 0deg/step, ret=0 # target was predictedly already reached \n pos[t3]: 10, speed[t3]: 0deg/step, ret=-1 # (best case scenario) server reported with delay, that target was reached (see tolerance)\n pos[t?]: 10, speed[t?]: 0deg/step, ret=-1 # if there is friction, it may take some additional steps \n\n If everything worked as predicted we could stop calling this function when ret==1\n If we need precision, it is recommended to wait for ret==-1\n\n Example 1:\n set_joints_target_position_direct(range(2,4),np.array([10.0,5.0]),harmonize=True) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=5, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=2.5, p[t2]=5\n\n Example 2:\n set_joints_target_position_direct([2,3],np.array([10.0,5.0]),harmonize=False) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=7, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=5, p[t2]=5 \n '''\n\n assert type(values) == np.ndarray, \"'values' argument must be a numpy array\"\n np.nan_to_num(values, copy=False) # Replace NaN with zero and infinity with large finite numbers\n\n # limit range of joints\n if limit_joints: \n if type(indices) == list or type(indices) == np.ndarray:\n for i in range(len(indices)):\n values[i] = np.clip(values[i], self.joints_info[indices[i]].min, self.joints_info[indices[i]].max)\n elif type(indices) == slice:\n info = self.joints_info[indices]\n for i in range(len(info)):\n values[i] = np.clip(values[i], info[i].min, info[i].max)\n else: # int\n values[0] = np.clip(values[0], self.joints_info[indices].min, self.joints_info[indices].max)\n\n #predicted_diff: predicted difference between reported position and actual position\n\n predicted_diff = self.joints_target_last_speed[indices] * 1.1459156 #rad/s to deg/step\n predicted_diff = np.asarray(predicted_diff)\n np.clip(predicted_diff,-7.03,7.03,out=predicted_diff) #saturate predicted movement in-place\n\n #reported_dist: difference between reported position and target position\n\n reported_dist = values - self.joints_position[indices]\n if np.all((np.abs(reported_dist) < tolerance)) and np.all((np.abs(predicted_diff) < tolerance)):\n self.joints_target_speed[indices] = 0\n return -1\n \n deg_per_step = reported_dist - predicted_diff\n\n relative_max = np.max( np.abs(deg_per_step) ) / max_speed\n remaining_steps = np.ceil( relative_max )\n\n if remaining_steps == 0:\n self.joints_target_speed[indices] = 0\n return 0\n\n if harmonize: \n deg_per_step /= remaining_steps\n else:\n np.clip(deg_per_step,-max_speed,max_speed,out=deg_per_step) #limit maximum speed\n\n self.joints_target_speed[indices] = deg_per_step * 0.87266463 #convert to rad/s\n\n return remaining_steps\n\n\n\n def get_command(self) -> bytes:\n '''\n Builds commands string from self.joints_target_speed\n '''\n j_speed = self.joints_target_speed * self.FIX_EFFECTOR_MASK #Fix symmetry issues 3/4 (effectors)\n cmd = \"\".join(f\"({self.joints_info[i].effector} {j_speed[i]:.5f})\" for i in range(self.no_of_joints)).encode('utf-8')\n\n self.joints_target_last_speed = self.joints_target_speed #1. both point to the same array\n self.joints_target_speed = np.zeros_like(self.joints_target_speed) #2. create new array for joints_target_speed\n return cmd" } ]
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
17,861
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool,
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool,
enable_draw:bool, logger:Logger, host:str) -> None:
0
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/tonemapping.py
[ { "identifier": "applyAgX", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgX(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display-ready array encoded for sRGB SDR monitors\n\n Args:\n array: float32 array, R-G-B format, sRGB Display\n \"\"\"\n\n # Apply Grading\n array = customLook1(array)\n array = applyAgxLog(array)\n array = applyAgxLut(array) # AgX Base\n # Ready for display.\n return array" }, { "identifier": "applyAgXPunchy", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgXPunchy(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display-ready array encoded for sRGB SDR monitors\n\n Args:\n array: float32 array, R-G-B format, sRGB Display\n \"\"\"\n\n # Apply Grading\n array = customLook1(array)\n array = applyAgxLog(array)\n array = applyAgxLut(array) # AgX Base\n array = applyLookPunchy(array=array)\n # Ready for display.\n return array" }, { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n cs_in = ColorSpace.Variant.SRGB_LIN\n cs_out = ColorSpace.Variant.OKLAB\n oklab_image = ColorSpace.convert(srgb_image, source=cs_in, destination=cs_out)\n \"\"\"\n class Variant(IntEnum):\n \"\"\"\n Color space enum. For a list of available options, see :ref:`ref_color_spaces`.\n \"\"\"\n UNKNOWN = 1<<0 \n NONCOLOR = 1<<1 \n CIE_XYZ = 1<<2 \n CIE_XYY = 1<<3 \n SRGB = 1<<4 \n SRGB_LIN = 1<<5 \n REC709 = 1<<6 \n REC2020 = 1<<7 \n REC2020_LIN = 1<<8 \n DCI_P3 = 1<<9 \n DCI_P3_LIN = 1<<10 \n DISPLAY_P3 = 1<<11 \n ACESCG = 1<<12 \n ACESCC = 1<<13 \n ACESCCT = 1<<14 \n ACES2065_1 = 1<<15 \n LMS = 1<<16 \n OKLAB = 1<<17 \n CIELAB = 1<<18 \n CIELUV = 1<<19 \n HSV = 1<<20 \n HSL = 1<<21 \n OKHSV = 1<<22\n OKHSL = 1<<23\n\n SCENE_LINEAR = SRGB_LIN | REC2020_LIN | DCI_P3_LIN | ACESCG | ACES2065_1 | CIE_XYZ\n PERCEPTUAL = OKLAB | CIELAB | CIELUV | OKHSL | OKHSV\n CYLINDRICAL = HSL | HSV | OKHSL | OKHSV\n\n GAMUT_SRGB = SRGB | SRGB_LIN | REC709 | HSL | HSV\n GAMUT_AP0 = ACES2065_1\n GAMUT_AP1 = ACESCG | ACESCC | ACESCCT\n GAMUT_REC2020 = REC2020 | REC2020_LIN\n GAMUT_DCI_P3 = DCI_P3 | DCI_P3_LIN\n GAMUT_DISPLAY_P3= DISPLAY_P3\n GAMUT_OKLAB = OKLAB | OKHSL | OKHSV\n GAMUT_CIE_XYZ = CIE_XYZ | CIE_XYY\n GAMUT_CIELAB = CIELAB\n GAMUT_CIELUV = CIELUV\n GAMUT_OTHER = LMS | UNKNOWN | NONCOLOR\n\n WP_D65 = SRGB | SRGB_LIN | REC709 | DISPLAY_P3 | REC2020 | REC2020_LIN | CIE_XYZ | CIE_XYY\n WP_CCT_6300 = DCI_P3 | DCI_P3_LIN\n WP_CCT_6000 = ACESCG | ACESCC | ACESCCT | ACES2065_1\n\n MODEL_RGB = SRGB | SRGB_LIN | REC709 | REC2020 | REC2020_LIN | DCI_P3 | DCI_P3_LIN | DISPLAY_P3 | \\\n ACESCG | ACESCC | ACESCCT | ACES2065_1\n MODEL_CIE = CIE_XYZ | CIE_XYY | CIELAB | CIELUV\n MODEL_CAM = 0\n MODEL_YUV = 0\n MODEL_OTHER = LMS | HSL | HSV | OKLAB # is OKLAB CAM-based?\n \n NEGATIVE = OKLAB | CIELAB | CIELUV | GAMUT_AP0\n NON_NEGATIVE = ~NEGATIVE\n\n DISABLED = CIELUV\n UNSUPPORTED = OKHSV | OKHSL # disabled doesn't go here - CS must have alternate path\n SUPPORTED = ~UNSUPPORTED \n\n # FIXME: LUV doesn't quite match expected values, needs further testing\n\n mat_xyz_to_srgb = [\n [3.24096994190452134, -1.53738317757009346, -0.498610760293003284],\n [-0.969243636280879826, 1.87596750150772067, 0.0415550574071756125],\n [0.0556300796969936084, -0.203976958888976564, 1.05697151424287856]]\n\n mat_srgb_to_xyz = [\n [0.412390799265959481, 0.357584339383877964, 0.180480788401834288],\n [0.212639005871510358, 0.715168678767755927, 0.072192315360733715],\n [0.0193308187155918507, 0.119194779794625988, 0.950532152249660581]]\n\n mat_srgb_to_acescg = [\n [ 0.6130974024, 0.3395231462, 0.04737945141],\n [ 0.07019372247, 0.916353879, 0.01345239847],\n [ 0.02061559288, 0.1095697729, 0.8698146341]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_acescg_to_srgb = [\n [ 1.705050993, -0.6217921206,-0.083258872],\n [-0.1302564175, 1.140804737, -0.01054831907],\n [-0.02400335681,-0.1289689761, 1.152972333]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_srgb_to_aces2065_1 = [\n [ 0.439632982, 0.382988698, 0.17737832],\n [ 0.0897764431, 0.813439429, 0.0967841284],\n [ 0.0175411704, 0.111546553, 0.870912277]]\n\n mat_aces2065_1_to_srgb = [\n [ 2.52168619, -1.13413099, -0.387555198],\n [-0.276479914, 1.37271909, -0.0962391736],\n [-0.015378065, -0.152975336, 1.1683534]]\n\n mat_srgb_to_displayp3 = [\n [ 0.822461969, 0.177538031, 1.15772692e-10],\n [ 0.0331941989, 0.966805801, 1.95085037e-11],\n [ 0.0170826307, 0.0723974405, 0.910519929]]\n\n mat_displayp3_to_srgb = [\n [ 1.22494018, -0.224940176, -4.77534979e-11],\n [-0.0420569547, 1.04205695, 3.37864801e-11],\n [-0.0196375546,-0.0786360454, 1.0982736]] \n\n # NOTE: No chromatic adaptation\n mat_srgb_to_dcip3 = [\n [0.868579739716132409, 0.128919138460847047, 0.00250112182302054368],\n [0.0345404102543194426, 0.961811386361919975, 0.0036482033837605824],\n [0.0167714290414502718, 0.0710399977868858352, 0.912188573171663893]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_srgb = [\n [ 1.15751640619975871, -0.154962378073857756, -0.00255402812590095854],\n [-0.0415000715306859699, 1.04556792307969925, -0.00406785154901328463],\n [-0.0180500389562539583,-0.0785782726530290654, 1.09662831160928302]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_xyz = [\n [ 0.445169815564552417, 0.277134409206777664, 0.172282669815564564],\n [ 0.209491677912730539, 0.721595254161043636, 0.0689130679262258258],\n [-3.63410131696985616e-17, 0.0470605600539811521, 0.907355394361973415]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_dcip3 = [\n [2.7253940304917328, -1.01800300622718496, -0.440163195190036463],\n [-0.795168025808764195, 1.689732054843624, 0.0226471906084774533],\n [0.0412418913957000325, -0.0876390192158623825, 1.10092937864632191]]\n\n mat_srgb_to_rec2020 = [\n [ 0.627403896, 0.329283039, 0.0433130657],\n [ 0.0690972894, 0.919540395, 0.0113623156],\n [ 0.0163914389, 0.0880133077, 0.895595253]]\n\n mat_rec2020_to_srgb = [\n [ 1.660491, -0.587641139,-0.0728498633],\n [-0.124550475, 1.1328999, -0.00834942258],\n [-0.0181507633,-0.100578898, 1.11872966]]\n\n mat_rec2020_to_xyz = [\n [0.636958048301291, 0.144616903586208, 0.168880975164172],\n [0.262700212011267, 0.677998071518871, 0.059301716469862],\n [4.99410657446607e-17, 0.0280726930490874, 1.06098505771079]]\n\n mat_xyz_to_rec2020 = [\n [1.71665118797127, -0.355670783776393, -0.25336628137366],\n [-0.666684351832489, 1.61648123663494, 0.0157685458139111],\n [0.0176398574453108, -0.0427706132578085, 0.942103121235474]]\n\n # NOTE: No chromatic adaptation\n mat_acescg_to_xyz = [\n [ 0.66245418, 0.13400421, 0.15618769],\n [ 0.27222872, 0.67408177, 0.05368952],\n [-0.00557465, 0.00406073, 1.0103391 ]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_acescg = [\n [ 1.64102338, -0.32480329, -0.2364247 ],\n [-0.66366286, 1.61533159, 0.01675635],\n [ 0.01172189, -0.00828444, 0.98839486]]\n\n # NOTE: For CIE XYZ color\n mat_d60_to_d65 = [\n [ 0.98722400,-0.00611327, 0.01595330],\n [-0.00759836, 1.00186000, 0.00533002],\n [ 0.00307257,-0.00509595, 1.08168000]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_d60 = [\n [ 1.01303000, 0.00610531,-0.01497100],\n [ 0.00769823, 0.99816500,-0.00503203],\n [-0.00284131, 0.00468516, 0.92450700]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_dci = [\n [0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],\n [-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],\n [-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]\n \n # NOTE: For CIE XYZ color\n mat_dci_to_d65 = [\n [1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],\n [0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],\n [0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]\n\n mat_xyz_to_lms = [\n [ 0.8951, 0.2664,-0.1614],\n [-0.7502, 1.7135, 0.0367],\n [ 0.0389,-0.0685, 1.0296]]\n\n mat_lms_to_xyz = [\n [ 0.986993, -0.147054, 0.159963],\n [ 0.432305, 0.51836, 0.0492912],\n [ -0.00852866, 0.0400428, 0.968487]]\n\n # OKLAB's XYZ to LMS\n mat_oklab_m1 = [\n [ 0.8189330101, 0.3618667424, -0.1288597137],\n [ 0.0329845436, 0.9293118715, 0.0361456387],\n [ 0.0482003018, 0.2643662691, 0.6338517070]]\n\n # OKLAB's non-linear L'M'S' to OKLAB\n mat_oklab_m2 = [\n [ 0.2104542553, 0.7936177850, -0.0040720468],\n [ 1.9779984951, -2.4285922050, 0.4505937099],\n [ 0.0259040371, 0.7827717662, -0.8086757660]]\n\n # Inverse of OKLAB M1\n mat_oklab_m1_inv = [\n [ 1.22701385, -0.55779998, 0.28125615],\n [-0.04058018, 1.11225687, -0.07167668],\n [-0.07638128, -0.42148198, 1.58616322]]\n\n # Inverse of OKLAB M2\n mat_oklab_m2_inv = [\n [ 1. , 0.39633779, 0.21580376],\n [ 1.00000001, -0.10556134, -0.06385417],\n [ 1.00000005, -0.08948418, -1.29148554]]\n\n @classmethod\n def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:\n \"\"\"\n Change the color space of an image. Cylindrical transformations HSV/HSL are \n treated as their own color spaces and assumed to be relative to sRGB linear. \n Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.\n\n .. warning::\n\n Tone mapping is not included, so converting the color space of HDR values to \n an LDR-designated color space will not automatically reduce dynamic range. For example, \n taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB \n gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.\n\n .. warning::\n\n Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range \n (or equivalent). This is not strictly enforced but input outside this range may yield \n unpredictable results or *NaN* values.\n\n :param im: [C=3, H, W] image tensor \n :type im: torch.Tensor | ColorImage\n :param source: color space to convert from\n :param destination: color space to convert to\n :return: image tensor in designated color space\n \"\"\"\n ip, op = source, destination\n cs = cls.Variant\n tf = TransferFunction\n if ip == op: return im\n\n assert im.dim() == 3 and im.size(0) == 3, f\"expected [C=3, H, W] image tensor, got {im.size()}\"\n assert source != 0, f\"Unknown source color space\"\n assert ip & cs.SUPPORTED, f\"Source color space not supported: {source.name}\"\n assert op & cs.SUPPORTED, f\"Destination color space not supported: {destination.name}\"\n assert ip & ~cs.DISABLED, f\"Source color space disabled: {ColorSpace.Variant(ip).name}\"\n assert op & ~cs.DISABLED, f\"Destination color space disabled: {ColorSpace.Variant(op).name}\"\n\n err_not_implemented = f\"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}\" \n\n # Direct path where it matters, loop-de-loop elsewhere\n if ip == cs.SRGB_LIN:\n if op == cs.SRGB: im = tf.srgb_oetf(im)\n elif op == cs.REC709: im = tf.rec709_oetf(im)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_srgb_to_rec2020))\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_srgb_to_rec2020)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.DCI_P3_LIN: im = mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DISPLAY_P3: im = tf.srgb_oetf(mm(im, cls.mat_srgb_to_displayp3))\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_srgb_to_xyz)\n elif op == cs.CIE_XYY: im = cls._xyz_to_xyy(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.LMS: im = cls._xyz_to_lms(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.ACESCG: im = mm(im, cls.mat_srgb_to_acescg)\n elif op == cs.ACESCC: im = cls._acescg_to_acescc(mm(im, cls.mat_srgb_to_acescg))\n elif op == cs.ACES2065_1: im = mm(im, cls.mat_srgb_to_aces2065_1)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.OKLAB: im = cls._rgb_to_oklab(im)\n elif op == cs.HSL: im = cls._rgb_to_hsl(tf.srgb_oetf(im))\n elif op == cs.HSV: im = cls._rgb_to_hsv(tf.srgb_oetf(im))\n else: raise Exception(err_not_implemented)\n elif ip == cs.SRGB:\n if op == cs.HSL: im = cls._rgb_to_hsl(im)\n elif op == cs.HSV: im = cls._rgb_to_hsv(im)\n else: im = cls.convert(tf.srgb_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC709: im = cls.convert(tf.rec709_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC2020: \n if op == cs.REC2020_LIN: im = tf.rec2020_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.REC2020_LIN: \n if op == cs.REC2020: im = tf.rec2020_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(im, cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(im, cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.DCI_P3: \n if op == cs.DCI_P3_LIN: im = tf.dcip3_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DCI_P3_LIN: \n if op == cs.DCI_P3: im = tf.dcip3_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DISPLAY_P3: im = cls.convert(mm(tf.srgb_eotf(im), cls.mat_displayp3_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYZ:\n if op == cs.CIE_XYY: im = cls._xyz_to_xyy(im)\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_xyz_to_rec2020)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_xyz_to_rec2020))\n elif op == cs.DCI_P3_LIN: im = mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.LMS: im = cls._xyz_to_lms(im)\n elif op == cs.ACESCG: im = mm(cls._d65_to_d60(im), cls.mat_xyz_to_acescg)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(im)\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(im)\n elif op == cs.OKLAB: im = cls._xyz_to_oklab(im)\n else: im = cls.convert(mm(im, cls.mat_xyz_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYY: \n if op == cs.CIE_XYZ: im = cls._xyy_to_xyz(im)\n else: im = cls.convert(cls._xyy_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.LMS: \n if op == cs.CIE_XYZ: im = cls._lms_to_xyz(im)\n else: im = cls.convert(cls._lms_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.ACESCG:\n # if op == cs.CIE_XYZ: im = cls._d60_to_d65(mm(im, cls.mat_acescg_to_xyz)) # FIXME: fails unit test (?)\n if op == cs.ACESCC: im = cls._acescg_to_acescc(im)\n else: im = cls.convert(mm(im, cls.mat_acescg_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.ACESCC:\n if op == cs.ACESCG: im = cls._acescc_to_acescg(im)\n else: im = cls.convert(cls._acescc_to_acescg(im), cs.ACESCG, op)\n elif ip == cs.ACES2065_1: im = cls.convert(mm(im, cls.mat_aces2065_1_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.HSL:\n if op == cs.SRGB: im = cls._hsl_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsl_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.HSV:\n if op == cs.SRGB: im = cls._hsv_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsv_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.CIELAB: im = cls.convert(cls._cielab_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.CIELUV: im = cls.convert(cls._cieluv_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.OKLAB:\n if op == cs.CIE_XYZ: im = cls._oklab_to_xyz(im)\n else: im = cls.convert(cls._oklab_to_rgb(im), cs.SRGB_LIN, op)\n else: raise Exception(err_not_implemented)\n\n return im\n\n @classmethod\n def _xyz_to_xyy(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to CIE xyY color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIE xyY color space tensor\n \"\"\"\n X = xyz[0:1]\n Y = xyz[1:2]\n Z = xyz[2:3]\n x = X / (X + Y + Z)\n y = Y / (X + Y + Z)\n return torch.cat([x, y, Y], dim=0)\n\n @classmethod\n def _xyy_to_xyz(cls, xyy:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE xyY color space to CIE XYZ color space.\n\n :param xyy: Input CIE xyY color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n x = xyy[0:1]\n y = xyy[1:2]\n Y = xyy[2:3]\n X = (Y / y) * x\n Z = (Y / y) * (1. - x - y)\n return torch.cat([X, Y, Z], dim=0)\n\n @classmethod\n def _xyz_to_lms(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to LMS color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: LMS color space tensor\n \"\"\"\n return mm(xyz, cls.mat_xyz_to_lms)\n\n @classmethod\n def _lms_to_xyz(cls, lms:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert LMS color space to CIE XYZ color space.\n\n :param lms: Input LMS color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n return mm(lms, cls.mat_lms_to_xyz)\n\n @classmethod\n def _acescg_to_acescc(cls, cg:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert scene-linear ACEScg to log ACEScc.\n\n :param lms: Input ACEScg color space tensor\n :return: ACEScc color space tensor\n \"\"\"\n res = torch.where(cg < 0.00003051757, \n (torch.log2(0.00001525878 + cg * 0.5) + 9.72) / 17.52, \n (torch.log2(cg) + 9.72) / 17.52)\n return res\n\n @classmethod\n def _acescc_to_acescg(cls, cc:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert log ACEScc to scene-linear ACEScg.\n\n :param lms: Input ACEScc color space tensor\n :return: ACEScg color space tensor\n \"\"\"\n res = torch.where(cc < -0.3013698630, \n (torch.exp2(cc * 17.52 - 9.72) - 0.00001525878) * 2,\n torch.exp2(cc * 17.52 - 9.72))\n return res\n\n @classmethod\n def _xyz_to_oklab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to OKLAB color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: OKLAB color space tensor\n \"\"\" \n lms = mm(xyz, cls.mat_oklab_m1)\n lms_p = torch.pow(torch.abs(lms), 0.3333333333) * torch.sign(lms).float()\n lab = mm(lms_p, cls.mat_oklab_m2)\n return lab\n\n @classmethod\n def _oklab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert OKLAB color space to CIE XYZ color space.\n\n :param lab: Input OKLAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n lms_p = mm(lab, cls.mat_oklab_m2_inv)\n lms = torch.pow(lms_p, 3.)\n xyz = mm(lms, cls.mat_oklab_m1_inv)\n return xyz\n\n\n @classmethod\n def __pivot_xyz_to_lab(cls, val): \n return torch.where(val > 0.008856, torch.pow(val, 0.3333333333), ((val * 903.3) + 16.0) / 116.0)\n\n @classmethod\n def _xyz_to_cielab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIE XYZ to CIELAB.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIELAB color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE. \n x = xyz[0:1] / 0.95047 \n y = xyz[1:2] / 1.00000 \n z = xyz[2:3] / 1.08883 \n\n x = cls.__pivot_xyz_to_lab(x)\n y = cls.__pivot_xyz_to_lab(y)\n z = cls.__pivot_xyz_to_lab(z)\n\n l = torch.maximum(torch.zeros_like(y).to(y.device), (116.0 * y) - 16.0)\n a = (x - y) * 500.0\n b = (y - z) * 200.0\n return torch.cat([l, a, b], dim=0)\n\n @classmethod\n def _cielab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIELAB to CIE XYZ.\n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param lab: Input CIELAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n l = lab[0:1]\n a = lab[1:2]\n b = lab[2:3]\n\n # Reminder: The y values is calculated first as it can be reused\n # for the calculation of x and z.\n y = (l + 16.0) / 116.0\n x = y + (a / 500.0)\n z = y - (b / 200.0)\n\n x3 = x * x * x\n z3 = z * z * z\n y3 = y * y * y\n\n x = torch.where(x3 > 0.008856, x3, ((x * 116.0) - 16.0) / 903.3)\n y = torch.where(l > 7.9996248, y3, l / 903.3)\n z = torch.where(z3 > 0.008856, z3, ((z * 116.0) - 16.0) / 903.3)\n\n x = x * 0.95047 \n y = y * 1.00000 \n z = z * 1.08883\n\n return torch.cat([x, y, z], dim=0)\n\n def _xyz_to_cieluv(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIE XYZ to CIELUV. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n small_L = (29. / 3) ** 3 * image[1]\n large_L = 116 * torch.pow(image[1], 1 / 3.) - 16\n L = torch.where(image[1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[0] + 15 * image[1] + 3 * image[2])\n u_prime = torch.where(denom != 0., 4 * image[0] / denom, 0.)\n v_prime = torch.where(denom != 0., 9 * image[1] / denom, 0.)\n d = 0\n elif len(image.size()) == 4:\n small_L = (29. / 3) ** 3 * image[:, 1]\n large_L = 116 * torch.pow(image[:, 1], 1 / 3.) - 16\n L = torch.where(image[:, 1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[:, 0] + 15 * image[:, 1] + 3 * image[:, 2])\n u_prime = torch.where(denom > 0., 4 * image[:, 0] / denom, 0.)\n v_prime = torch.where(denom > 0., 9 * image[:, 1] / denom, 0.)\n d = 1\n\n u = 13 * L * (u_prime - .2009)\n v = 13 * L * (v_prime - .4610)\n\n luv_image = torch.stack((L, u, v), dim=d)\n\n return luv_image\n\n def _cieluv_to_xyz(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIELUV to CIE XYZ. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n denom = (13 * image[0])\n u_prime = torch.where(denom != 0., image[1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[2] / denom, 0.) + .4610\n\n small_Y = image[0] * (3. / 29) ** 3\n large_Y = ((image[0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[0] <= 8, small_Y, large_Y)\n d = 0\n # batch of images\n elif len(image.size()) == 4:\n denom = (13 * image[:, 0])\n u_prime = torch.where(denom != 0., image[:, 1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[:, 2] / denom, 0.) + .4610\n\n small_Y = image[:, 0] * (3. / 29) ** 3\n large_Y = ((image[:, 0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[:, 0] <= 8, small_Y, large_Y)\n d = 1\n\n X = torch.where(v_prime != 0., Y * 9 * u_prime / (4 * v_prime), 0.)\n Z = torch.where(v_prime != 0., Y * (12 - 3 * u_prime - 20 * v_prime) / (4 * v_prime), 0.)\n\n xyz_image = torch.stack((X, Y, Z), dim=d)\n\n return xyz_image\n\n @classmethod\n def _rgb_to_oklab(cls, rgb:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from linear sRGB to OKLAB.\n\n :param rgb: Input linear sRGB color space tensor\n :return: OKLAB color space tensor\n \"\"\"\n cr = rgb[0:1]\n cg = rgb[1:2]\n cb = rgb[2:3]\n\n l = 0.4122214708 * cr + 0.5363325363 * cg + 0.0514459929 * cb;\n m = 0.2119034982 * cr + 0.6806995451 * cg + 0.1073969566 * cb;\n s = 0.0883024619 * cr + 0.2817188376 * cg + 0.6299787005 * cb;\n\n l_ = torch.pow(torch.abs(l), 0.3333333333) * torch.sign(l).float()\n m_ = torch.pow(torch.abs(m), 0.3333333333) * torch.sign(m).float()\n s_ = torch.pow(torch.abs(s), 0.3333333333) * torch.sign(s).float()\n\n return torch.cat([\n 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_,\n 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_,\n 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_], dim=0)\n\n @classmethod\n def _oklab_to_rgb(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from OKLAB to linear sRGB.\n\n :param lab: Input OKLAB color space tensor\n :return: Linear sRGB color space tensor\n \"\"\"\n cl = lab[0:1]\n ca = lab[1:2]\n cb = lab[2:3]\n\n l_ = cl + 0.3963377774 * ca + 0.2158037573 * cb\n m_ = cl - 0.1055613458 * ca - 0.0638541728 * cb\n s_ = cl - 0.0894841775 * ca - 1.2914855480 * cb\n\n l = l_*l_*l_\n m = m_*m_*m_\n s = s_*s_*s_\n\n return torch.cat([\n +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s,\n -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s,\n -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s], dim=0)\n\n @classmethod\n def _rgb_to_hsl(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSL. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n :param rgb: Input sRGB image tensor\n :return: HSL image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsl_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsl_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsl_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsl_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsl_h[cmax_idx == 3] = 0.\n hsl_h /= 6.\n\n hsl_l = (cmax + cmin) / 2.\n hsl_s = torch.empty_like(hsl_h)\n hsl_s[hsl_l == 0] = 0\n hsl_s[hsl_l == 1] = 0\n hsl_l_ma = torch.bitwise_and(hsl_l > 0, hsl_l < 1)\n hsl_l_s0_5 = torch.bitwise_and(hsl_l_ma, hsl_l <= 0.5)\n hsl_l_l0_5 = torch.bitwise_and(hsl_l_ma, hsl_l > 0.5)\n hsl_s[hsl_l_s0_5] = ((cmax - cmin) / (hsl_l * 2.))[hsl_l_s0_5]\n hsl_s[hsl_l_l0_5] = ((cmax - cmin) / (- hsl_l * 2. + 2.))[hsl_l_l0_5]\n return torch.cat([hsl_h, hsl_s, hsl_l], dim=1).squeeze(0)\n\n @classmethod\n def _hsl_to_rgb(cls, hsl: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSL image tensor to sRGB. \n \n .. note::\n\n returns non-linear sRGB w/ gamma curve as output\n\n :param hsl: Input HSL image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsl = hsl.unsqueeze(0)\n hsl_h, hsl_s, hsl_l = hsl[:, 0:1], hsl[:, 1:2], hsl[:, 2:3]\n _c = (-torch.abs(hsl_l * 2. - 1.) + 1) * hsl_s\n _x = _c * (-torch.abs(hsl_h * 6. % 2. - 1) + 1.)\n _m = hsl_l - _c / 2.\n idx = (hsl_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsl).to(hsl.device)\n _o = torch.zeros_like(_c).to(hsl.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _rgb_to_hsv(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSV. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n .. warning::\n\n input tensor will be clamped to [0, 1] range\n\n :param rgb: Input sRGB image tensor\n :return: HSV image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.clamp(0.,1.).unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsv_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsv_h[cmax_idx == 3] = 0.\n hsv_h /= 6.\n hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)\n hsv_v = cmax\n return torch.cat([hsv_h, hsv_s, hsv_v], dim=1).squeeze(0)\n\n @classmethod\n def _hsv_to_rgb(cls, hsv: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSV image tensor to sRGB. \n \n .. note::\n \n returns non-linear sRGB w/ gamma curve as output\n\n :param hsv: Input HSV image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsv = hsv.unsqueeze(0)\n hsv_h, hsv_s, hsv_l = hsv[:, 0:1], hsv[:, 1:2], hsv[:, 2:3]\n _c = hsv_l * hsv_s\n _x = _c * (- torch.abs(hsv_h * 6. % 2. - 1) + 1.)\n _m = hsv_l - _c\n _o = torch.zeros_like(_c).to(hsv.device)\n idx = (hsv_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsv).to(hsv.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _d60_to_d65(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from \"D60\" to D65 white point.\n\n :param im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n # There is not really a CIE D60 white point, but that's what everyone calls what ACES uses.\n return mm(im, cls.mat_d60_to_d65)\n\n @classmethod\n def _d65_to_d60(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from D65 to \"D60\" white point.\n\n :param torch.Tensor im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n return mm(im, cls.mat_d65_to_d60)" }, { "identifier": "TransferFunction", "path": "src/tinycio/colorspace.py", "snippet": "class TransferFunction: \n \"\"\"\n Opto-electronic/electro-optical transfer functions. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n im_srgb = TransferFunction.srgb_oetf(im_linear)\n\n .. note::\n These transfer functions are applied automatically by :code:`ColorSpace.convert` when appropriate, \n but can instead be used explicitly.\n\n \"\"\"\n @staticmethod\n def srgb_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n sRGB electro-optical transfer function (sRGB gamma to linear sRGB)\n\n :param im: sRGB image tensor \n :return: linear sRGB image tensor\n \"\"\"\n s1 = im / 12.92321\n s2 = torch.pow((im + 0.055) / 1.055, 12. / 5)\n return torch.where(im <= 0.04045, s1, s2)\n\n @staticmethod\n def srgb_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n sRGB opto-electronic transfer function (linear sRGB to sRGB gamma)\n\n :param im: linear sRGB image tensor \n :return: sRGB image tensor\n \"\"\"\n s1 = im * 12.92321\n s2 = torch.pow(im, 1. / 2.4) * 1.055 - 0.055\n return torch.where(im <= 0.0031308, s1, s2)\n\n @staticmethod\n def rec709_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 709 electro-optical transfer function (Rec. 709 gamma to linear sRGB)\n\n :param im: Rec. 709 image tensor \n :return: linear sRGB image tensor (same primaries)\n \"\"\"\n s1 = im / 4.5\n s2 = torch.pow((im + 0.099) / 1.099, 2.2)\n return torch.where(im <= 0.081, s1, s2)\n\n @staticmethod\n def rec709_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 709 opto-electronic transfer function (linear sRGB to Rec. 709 gamma)\n\n :param im: linear sRGB image tensor (same primaries)\n :return: Rec. 709 image tensor\n \"\"\"\n s1 = im * 4.5\n s2 = torch.pow(im, .4545) * 1.099 - 0.099\n return torch.where(im <= 0.018, s1, s2)\n\n @staticmethod\n def rec2020_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 2020 electro-optical transfer function (Rec. 2020 gamma to linear)\n\n :param im: Rec. 2020 image tensor \n :return: linear Rec. 2020 gamut image tensor\n \"\"\"\n a = 1.09929682680944\n b = 0.08124285829 \n s1 = im / 4.5\n s2 = torch.pow((im + a - 1.) / a, 1./ 0.45)\n return torch.where(im <= b, s1, s2)\n\n @staticmethod\n def rec2020_oetf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n Rec. 2020 opto-electronic transfer function (linear to Rec. 2020 gamma)\n\n :param im: linear Rec. 2020 gamut image tensor \n :return: Rec. 2020 image tensor\n \"\"\"\n a = 1.09929682680944\n b = 0.018053968510807\n s1 = im * 4.5\n s2 = a * torch.pow(im, .45) - (a - 1.)\n return torch.where(im <= b, s1, s2)\n\n @staticmethod\n def dcip3_eotf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n DCI P3 electro-optical transfer function (DCI P3 gamma to linear)\n\n :param im: DCI P3 image tensor \n :return: linear P3 gamut image tensor\n \"\"\"\n return torch.pow(im, 2.6)\n\n @staticmethod\n def dcip3_oetf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n DCI P3 opto-electronic transfer function (linear to DCI P3 gamma)\n\n :param im: linear P3 gamut image tensor \n :return: DCI P3 image tensor\n \"\"\"\n return torch.pow(im, 1./2.6)\n\n @staticmethod\n def log_c_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n LogC electro-optical transfer function\n\n :param im: LogC encoded image tensor\n :return: linear image tensor \n \"\"\"\n offset = 0.00937677\n x = im.clone()\n x = torch.where(x > 0.1496582, \n torch.pow(10.0, (x - 0.385537) / 0.2471896) * 0.18 - offset,\n (x / 0.9661776 - 0.04378604) * 0.18 - offset)\n return x\n\n @staticmethod\n def log_c_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n LogC opto-electronic transfer function\n\n :param im: linear image tensor \n :return: LogC encoded image tensor\n \"\"\"\n offset = 0.00937677\n x = im.clone()\n x = torch.where(x > 0.02 - offset,\n (((torch.log10((x + offset) / 0.18)) * 0.2471896) + 0.385537),\n ((((x + offset) / 0.18) + 0.04378604) * 0.9661776))\n return x\n\n @staticmethod\n def s_log_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n S-Log electro-optical transfer function\n\n :param im: S-Log encoded image tensor\n :return: linear image tensor \n \"\"\"\n x = im.clone()\n return torch.pow(10.0, ((x - 0.616596 - 0.03) / 0.432699)) - 0.037584\n\n @staticmethod\n def s_log_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n S-Log opto-electronic transfer function\n\n :param im: linear image tensor \n :return: S-Log encoded image tensor\n \"\"\"\n x = im.clone()\n return (0.432699 * torch.log10(x + 0.037584) + 0.616596) + 0.03" } ]
import torch import numpy as np import typing from enum import IntEnum from .np_agx.agx import applyAgX, applyAgXPunchy from .colorspace import ColorSpace, TransferFunction
17,575
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device
out = applyAgX(im.permute(1, 2, 0).cpu().numpy())
0
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetProcessor", "path": "magicanimate/models/multicontrolnet.py", "snippet": "class ControlNetProcessor(object):\n def __init__(\n self,\n controlnet: ControlNetModel,\n # image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],\n # controlnet_cond = torch.FloatTensor, #fix\n # conditioning_scale: float = 1.0,\n ):\n self.controlnet = controlnet\n # self.image = image\n # self.controlnet_cond = controlnet_cond #fix\n # self.conditioning_scale = conditioning_scale\n\n # def _default_height_width(self, height, width, image):\n # if isinstance(image, list):\n # image = image[0]\n\n # if height is None:\n # if isinstance(image, PIL.Image.Image):\n # height = image.height\n # elif isinstance(image, torch.Tensor):\n # height = image.shape[3]\n\n # height = (height // 8) * 8 # round down to nearest multiple of 8\n\n # if width is None:\n # if isinstance(image, PIL.Image.Image):\n # width = image.width\n # elif isinstance(image, torch.Tensor):\n # width = image.shape[2]\n\n # width = (width // 8) * 8 # round down to nearest multiple of 8\n\n # return height, width\n\n # def default_height_width(self, height, width):\n # return self._default_height_width(height, width, self.image)\n\n # def _prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype):\n # if not isinstance(image, torch.Tensor):\n # if isinstance(image, PIL.Image.Image):\n # image = [image]\n\n # if isinstance(image[0], PIL.Image.Image):\n # image = [\n # np.array(i.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image\n # ]\n # image = np.concatenate(image, axis=0)\n # image = np.array(image).astype(np.float32) / 255.0\n # image = image.transpose(0, 3, 1, 2)\n # image = torch.from_numpy(image)\n # elif isinstance(image[0], torch.Tensor):\n # image = torch.cat(image, dim=0)\n\n # image_batch_size = image.shape[0]\n\n # if image_batch_size == 1:\n # repeat_by = batch_size\n # else:\n # # image batch size is the same as prompt batch size\n # repeat_by = num_images_per_prompt\n\n # image = image.repeat_interleave(repeat_by, dim=0)\n\n # image = image.to(device=device, dtype=dtype)\n\n # return image\n\n # def _check_inputs(self, image, prompt, prompt_embeds):\n # image_is_pil = isinstance(image, PIL.Image.Image)\n # image_is_tensor = isinstance(image, torch.Tensor)\n # image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n # image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n # if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n # raise TypeError(\n # \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n # )\n\n # if image_is_pil:\n # image_batch_size = 1\n # elif image_is_tensor:\n # image_batch_size = image.shape[0]\n # elif image_is_pil_list:\n # image_batch_size = len(image)\n # elif image_is_tensor_list:\n # image_batch_size = len(image)\n\n # if prompt is not None and isinstance(prompt, str):\n # prompt_batch_size = 1\n # elif prompt is not None and isinstance(prompt, list):\n # prompt_batch_size = len(prompt)\n # elif prompt_embeds is not None:\n # prompt_batch_size = prompt_embeds.shape[0]\n\n # if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n # raise ValueError(\n # f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n # )\n\n # def check_inputs(self, prompt, prompt_embeds):\n # self._check_inputs(self.image, prompt, prompt_embeds)\n\n # def prepare_image(self, width, height, batch_size, num_images_per_prompt, device, do_classifier_free_guidance):\n # self.image = self._prepare_image(\n # self.image, width, height, batch_size, num_images_per_prompt, device, self.controlnet.dtype\n # )\n # if do_classifier_free_guidance:\n # self.image = torch.cat([self.image] * 2)\n\n def __call__(\n self,\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond, #fix\n conditioning_scale,\n return_dict,\n ) -> Tuple:\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond,\n conditioning_scale, \n return_dict=False,\n )\n down_block_res_samples = [\n down_block_res_sample * conditioning_scale for down_block_res_sample in down_block_res_samples\n ]\n mid_block_res_sample *= conditioning_scale\n return (down_block_res_samples, mid_block_res_sample)" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,878
for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], processors: List[ControlNetProcessor], #fix video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition1: list = None, controlnet_condition2: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ # controlnet = self.controlnet # processors = self.processors # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size)
2
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "RecentlyUsedContainer", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n _container: typing.OrderedDict[_KT, _VT]\n _maxsize: int\n dispose_func: typing.Callable[[_VT], None] | None\n lock: RLock\n\n def __init__(\n self,\n maxsize: int = 10,\n dispose_func: typing.Callable[[_VT], None] | None = None,\n ) -> None:\n super().__init__()\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n self._container = OrderedDict()\n self.lock = RLock()\n\n def __getitem__(self, key: _KT) -> _VT:\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key: _KT, value: _VT) -> None:\n evicted_item = None\n with self.lock:\n # Possibly evict the existing value of 'key'\n try:\n # If the key exists, we'll overwrite it, which won't change the\n # size of the pool. Because accessing a key should move it to\n # the end of the eviction line, we pop it out first.\n evicted_item = key, self._container.pop(key)\n self._container[key] = value\n except KeyError:\n # When the key does not exist, we insert the value first so that\n # evicting works in all cases, including when self._maxsize is 0\n self._container[key] = value\n if len(self._container) > self._maxsize:\n # If we didn't evict an existing value, and we've hit our maximum\n # size, then we have to evict the least recently used item from\n # the beginning of the container.\n evicted_item = self._container.popitem(last=False)\n\n # After releasing the lock on the pool, dispose of any evicted value.\n if evicted_item is not None and self.dispose_func:\n _, evicted_value = evicted_item\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key: _KT) -> None:\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self) -> int:\n with self.lock:\n return len(self._container)\n\n def __iter__(self) -> typing.NoReturn:\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self) -> None:\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(self._container.values())\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self) -> set[_KT]: # type: ignore[override]\n with self.lock:\n return set(self._container.keys())" }, { "identifier": "RequestMethods", "path": ".venv/Lib/site-packages/urllib3/_request_methods.py", "snippet": "class RequestMethods:\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:\n self.headers = headers or {}\n\n def urlopen(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **kw: typing.Any,\n ) -> BaseHTTPResponse: # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n json: typing.Any | None = None,\n **urlopen_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if json is not None and body is not None:\n raise TypeError(\n \"request got values for both 'body' and 'json' parameters which are mutually exclusive\"\n )\n\n if json is not None:\n if headers is None:\n headers = self.headers.copy() # type: ignore\n if not (\"content-type\" in map(str.lower, headers.keys())):\n headers[\"Content-Type\"] = \"application/json\" # type: ignore\n\n body = _json.dumps(json, separators=(\",\", \":\"), ensure_ascii=False).encode(\n \"utf-8\"\n )\n\n if body is not None:\n urlopen_kw[\"body\"] = body\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method,\n url,\n fields=fields, # type: ignore[arg-type]\n headers=headers,\n **urlopen_kw,\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(\n self,\n method: str,\n url: str,\n fields: _TYPE_ENCODE_URL_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method: str,\n url: str,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": HTTPHeaderDict(headers)}\n body: bytes | str\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields), # type: ignore[arg-type]\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"].setdefault(\"Content-Type\", content_type)\n\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "ProxyConfig", "path": ".venv/Lib/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "HTTPConnectionPool", "path": ".venv/Lib/site-packages/urllib3/connectionpool.py", "snippet": "_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]\nclass ConnectionPool:\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host: str, port: int | None = None) -> None:\n def __str__(self) -> str:\n def __enter__(self: _SelfT) -> _SelfT:\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n def close(self) -> None:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n def _new_conn(self) -> BaseHTTPConnection:\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n def close(self) -> None:\n def is_same_host(self, url: str) -> bool:\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n def _new_conn(self) -> BaseHTTPSConnection:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\ndef connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:\ndef _normalize_host(host: None, scheme: str | None) -> None:\ndef _normalize_host(host: str, scheme: str | None) -> str:\ndef _normalize_host(host: str | None, scheme: str | None) -> str | None:\ndef _url_from_pool(\n pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None\n) -> str:\ndef _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:" }, { "identifier": "LocationValueError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"" }, { "identifier": "MaxRetryError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param str url: The requested Url\n :param reason: The underlying error\n :type reason: :class:`Exception`\n\n \"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, reason: Exception | None = None\n ) -> None:\n self.reason = reason\n\n message = f\"Max retries exceeded with url: {url} (Caused by {reason!r})\"\n\n super().__init__(pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme: str | None) -> None:\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = f\"Proxy URL had unsupported scheme {scheme}, should use http:// or https://\"\n super().__init__(message)" }, { "identifier": "URLSchemeUnknown", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme: str):\n message = f\"Not supported URL scheme {scheme}\"\n super().__init__(message)\n\n self.scheme = scheme" }, { "identifier": "BaseHTTPResponse", "path": ".venv/Lib/site-packages/urllib3/response.py", "snippet": "class BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"x-gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,\n status: int,\n version: int,\n reason: str | None,\n decode_content: bool,\n request_url: str | None,\n retries: Retry | None = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self._has_decoded_content = False\n self._request_url: str | None = request_url\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: ContentDecoder | None = None\n\n def get_redirect_location(self) -> str | None | Literal[False]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> typing.Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> str | None:\n raise NotImplementedError()\n\n @url.setter\n def url(self, url: str | None) -> None:\n raise NotImplementedError()\n\n @property\n def connection(self) -> HTTPConnection | None:\n raise NotImplementedError()\n\n @property\n def retries(self) -> Retry | None:\n return self._retries\n\n @retries.setter\n def retries(self, retries: Retry | None) -> None:\n # Override the request_url if retries has a redirect location.\n if retries is not None and retries.history:\n self.url = retries.history[-1].redirect_location\n self._retries = retries\n\n def stream(\n self, amt: int | None = 2**16, decode_content: bool | None = None\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: bool | None, flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n if self._has_decoded_content:\n raise RuntimeError(\n \"Calling read(decode_content=False) is not supported after \"\n \"read(decode_content=True) was called.\"\n )\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n self._has_decoded_content = True\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> HTTPHeaderDict:\n warnings.warn(\n \"HTTPResponse.getheaders() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers\n\n def getheader(self, name: str, default: str | None = None) -> str | None:\n warnings.warn(\n \"HTTPResponse.getheader() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> str | None:\n return self.url" }, { "identifier": "_TYPE_SOCKET_OPTIONS", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]" }, { "identifier": "connection_requires_http_tunnel", "path": ".venv/Lib/site-packages/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n destination_scheme: str | None = None,\n) -> bool:\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": ".venv/Lib/site-packages/urllib3/util/retry.py", "snippet": "class Retry:\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool:\n\n .. code-block:: python\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request(\"GET\", \"https://example.com/\")\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=Retry(10))\n\n Retries can be disabled by passing ``False``:\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param Collection allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``None`` value to retry on any verb.\n\n :param Collection status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of previous retries}))\n\n seconds. If `backoff_jitter` is non-zero, this sleep is extended by::\n\n random.uniform(0, {backoff jitter})\n\n seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will\n sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever\n be longer than `backoff_max`.\n\n By default, backoff is disabled (factor set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param Collection remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Default maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n # Backward compatibility; assigned outside of the class.\n DEFAULT: typing.ClassVar[Retry]\n\n def __init__(\n self,\n total: bool | int | None = 10,\n connect: int | None = None,\n read: int | None = None,\n redirect: bool | int | None = None,\n status: int | None = None,\n other: int | None = None,\n allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,\n status_forcelist: typing.Collection[int] | None = None,\n backoff_factor: float = 0,\n backoff_max: float = DEFAULT_BACKOFF_MAX,\n raise_on_redirect: bool = True,\n raise_on_status: bool = True,\n history: tuple[RequestHistory, ...] | None = None,\n respect_retry_after_header: bool = True,\n remove_headers_on_redirect: typing.Collection[\n str\n ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,\n backoff_jitter: float = 0.0,\n ) -> None:\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.backoff_max = backoff_max\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or ()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n h.lower() for h in remove_headers_on_redirect\n )\n self.backoff_jitter = backoff_jitter\n\n def new(self, **kw: typing.Any) -> Retry:\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n allowed_methods=self.allowed_methods,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n backoff_max=self.backoff_max,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n backoff_jitter=self.backoff_jitter,\n )\n\n params.update(kw)\n return type(self)(**params) # type: ignore[arg-type]\n\n @classmethod\n def from_int(\n cls,\n retries: Retry | bool | int | None,\n redirect: bool | int | None = True,\n default: Retry | bool | int | None = None,\n ) -> Retry:\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self) -> float:\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n if self.backoff_jitter != 0.0:\n backoff_value += random.random() * self.backoff_jitter\n return float(max(0, min(self.backoff_max, backoff_value)))\n\n def parse_retry_after(self, retry_after: str) -> float:\n seconds: float\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(f\"Invalid Retry-After header: {retry_after}\")\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n seconds = max(seconds, 0)\n\n return seconds\n\n def get_retry_after(self, response: BaseHTTPResponse) -> float | None:\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self) -> None:\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response: BaseHTTPResponse | None = None) -> None:\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err: Exception) -> bool:\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err: Exception) -> bool:\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method: str) -> bool:\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n if self.allowed_methods and method.upper() not in self.allowed_methods:\n return False\n return True\n\n def is_retry(\n self, method: str, status_code: int, has_retry_after: bool = False\n ) -> bool:\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return bool(\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self) -> bool:\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = [\n x\n for x in (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n if x\n ]\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method: str | None = None,\n url: str | None = None,\n response: BaseHTTPResponse | None = None,\n error: Exception | None = None,\n _pool: ConnectionPool | None = None,\n _stacktrace: TracebackType | None = None,\n ) -> Retry:\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.BaseHTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or method is None or not self._is_method_retryable(method):\n raise reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n response_redirect_location = response.get_redirect_location()\n if response_redirect_location:\n redirect_location = response_redirect_location\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n reason = error or ResponseError(cause)\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(total={self.total}, connect={self.connect}, \"\n f\"read={self.read}, redirect={self.redirect}, status={self.status})\"\n )" }, { "identifier": "Timeout", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "Url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" }, { "identifier": "parse_url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" } ]
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing import Literal
20,175
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ca_cert_data", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ca_cert_data: str | bytes | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ca_cert_data", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ca_cert_data: str | bytes | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None
key__proxy_config: ProxyConfig | None
3
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,568
while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value)
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value)
cv2.imwrite(get_and_create_path(file_path, output_dir), frame_im, imwrite_param)
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n FINETUNING_MONGO_HOST: str = os.getenv(\"FINETUNING_MONGO_HOST\", \"mongo\")\n FINETUNING_MONGO_PORT: int = os.getenv(\"FINETUNING_MONGO_PORT\", 27017)\n FINETUNING_MONGO_DB_NAME: str = os.getenv(\n \"FINETUNING_MONGO_DB_NAME\", \"embedding_studio\"\n )\n FINETUNING_MONGO_USERNAME: str = os.getenv(\n \"FINETUNING_MONGO_USERNAME\", \"root\"\n )\n FINETUNING_MONGO_PASSWORD: str = os.getenv(\n \"FINETUNING_MONGO_PASSWORD\", \"mongopassword\"\n )\n FINETUNING_MONGO_URL: str = (\n f\"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@\"\n f\"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}\"\n )\n CLICKSTREAM_MONGO_HOST: str = os.getenv(\"CLICKSTREAM_MONGO_HOST\", \"mongo\")\n CLICKSTREAM_MONGO_PORT: int = os.getenv(\"CLICKSTREAM_MONGO_PORT\", 27017)\n CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_DB_NAME\", \"embedding_studio\"\n )\n CLICKSTREAM_MONGO_USERNAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_USERNAME\", \"root\"\n )\n CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(\n \"CLICKSTREAM_MONGO_PASSWORD\", \"mongopassword\"\n )\n CLICKSTREAM_MONGO_URL: str = (\n f\"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@\"\n f\"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}\"\n )\n REDIS_HOST: str = os.getenv(\"REDIS_HOST\", \"localhost\")\n REDIS_PORT: int = os.getenv(\"REDIS_PORT\", 6379)\n REDIS_PASSWORD: str = os.getenv(\"REDIS_PASSWORD\", \"redispassword\")\n REDIS_URL: str = f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\"\n MINIO_HOST: str = os.getenv(\"MINIO_HOST\", \"localhost\")\n MINIO_PORT: int = os.getenv(\"MINIO_PORT\", 9000)\n MINIO_ROOT_USER: str = os.getenv(\"MINIO_ROOT_USER\", \"root\")\n MINIO_ROOT_PASSWORD: str = os.getenv(\n \"MINIO_ROOT_PASSWORD\", \"miniopassword\"\n )\n MINIO_DEFAULT_BUCKETS: str = os.getenv(\n \"MINIO_DEFAULT_BUCKETS\", \"embeddingstudio\"\n )\n MINIO_ACCESS_KEY: str = os.getenv(\n \"MINIO_ACCESS_KEY\", \"mtGNiEvoTL6C0EXAMPLE\"\n )\n MINIO_SECRET_KEY: str = os.getenv(\n \"MINIO_SECRET_KEY\", \"HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY\"\n )\n MYSQL_HOST: str = os.getenv(\"MYSQL_HOST\", \"localhost\")\n MYSQL_PORT: int = os.getenv(\"MYSQL_PORT\", 3306)\n MYSQL_DATABASE: str = os.getenv(\"MYSQL_DATABASE\", \"mlflow\")\n MYSQL_USER: str = os.getenv(\"MYSQL_USER\", \"mlflow_user\")\n MYSQL_PASSWORD: str = os.getenv(\"MYSQL_PASSWORD\", \"Baxp3O5rUvpIxiD77BfZ\")\n MYSQL_ROOT_PASSWORD: str = os.getenv(\n \"MYSQL_ROOT_PASSWORD\", \"PrK5qmPTDsm2IYKvHVG8\"\n )\n MLFLOW_HOST: str = os.getenv(\"MLFLOW_HOST\", \"localhost\")\n MLFLOW_PORT: int = os.getenv(\"MLFLOW_PORT\", 5001)\n MLFLOW_TRACKING_URI: str = f\"http://{MLFLOW_HOST}:{MLFLOW_PORT}\"\n ES_PLUGINS_PATH: str = os.getenv(\"ES_PLUGINS_PATH\", \"plugins\")\n FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(\n \"FINE_TUNING_WORKER_MAX_RETRIES\", 3\n )\n FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(\n \"FINE_TUNING_WORKER_TIME_LIMIT\", 18000000\n )\n DEFAULT_MAX_ATTEMPTS: int = os.getenv(\"DEFAULT_MAX_ATTEMPTS\", 3)\n DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(\n \"DEFAULT_WAIT_TIME_SECONDS\", 3.0\n )\n S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(\n \"S3_READ_CREDENTIALS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_READ_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_READ_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(\n \"S3_DOWNLOAD_DATA_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_METRIC_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_PARAM_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOAD_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_RUNS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_END_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_END_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS\",\n DEFAULT_WAIT_TIME_SECONDS,\n )\n MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC\", 12 * 60 * 60\n )\n CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC\", 5 * 60\n )" }, { "identifier": "FineTuningMethod", "path": "embedding_studio/core/plugin.py", "snippet": "class FineTuningMethod(ABC):\n \"\"\"Base class (plugin) for fine-tuning methods.\n\n All fine-tuning methods must inherit from this class.\n \"\"\"\n\n meta: PluginMeta\n\n @abstractmethod\n def upload_initial_model(self) -> None:\n \"\"\"Upload the initial model to the storage.\n\n Method that should be implemented by subclasses to upload the\n initial model to the storage.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement upload_initial_model\"\n )\n\n @abstractmethod\n def get_fine_tuning_builder(\n self, clickstream: List[SessionWithEvents]\n ) -> FineTuningBuilder:\n \"\"\"Return a FineTuningBuilder instance for the fine-tuning process.\n\n Method that should be implemented by subclasses to provide a\n FineTuningBuilder instance.\n\n :param clickstream: Collection of user feedback, used to enhance\n the model.\n :return: An instance of FineTuningBuilder used for\n launching the fine-tuning process.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement get_fine_tuning_builder\"\n )" }, { "identifier": "AWSS3ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/s3_parser.py", "snippet": "class AWSS3ClickstreamParser(ClickstreamParser):\n def __init__(\n self, query_item_type: type, search_result_type: type, event_type: type\n ):\n super(AWSS3ClickstreamParser, self).__init__(\n query_item_type, search_result_type, S3FileMeta, event_type\n )" }, { "identifier": "DummyEventType", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class DummyEventType(EventType):\n importance: float\n\n @property\n def event_importance(self) -> float:\n return self.importance" }, { "identifier": "SearchResult", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class SearchResult(BaseModel):\n item: ItemMeta\n is_click: bool\n rank: Optional[float] = None\n event_type: Optional[EventType] = None\n timestamp: Optional[int] = None\n\n @validator(\"event_type\", pre=True, always=True)\n def validate_event_type(cls, value, values):\n if value is not None and not isinstance(value, EventType):\n raise ValueError(\"Invalid event_type instance\")\n return value\n\n class Config:\n arbitrary_types_allowed = True\n\n @classmethod\n def from_mongo(\n cls,\n result: SearchResultItem,\n event_ids: Set[str],\n item_type: type,\n event_type: type,\n ) -> \"SearchResult\":\n event_instance = DummyEventType(importance=1)\n\n return cls(\n item=item_type(**result.meta),\n is_click=result.object_id in event_ids,\n event_type=event_instance,\n timestamp=None,\n )\n\n @classmethod\n def from_dict(\n cls, data: dict, item_type: type, event_type: type\n ) -> \"SearchResult\":\n event_data: Optional[Dict] = data.get(\"event_type\")\n event_instance = None\n\n if event_data is not None:\n event_instance = event_type(**event_data)\n\n return cls(\n item=item_type(**data[\"item\"]),\n is_click=data[\"is_click\"],\n rank=data[\"rank\"],\n event_type=event_instance,\n timestamp=int(data.get(\"timestamp\")),\n )" }, { "identifier": "ClickstreamSessionsSplitter", "path": "embedding_studio/embeddings/data/clickstream/splitter.py", "snippet": "class ClickstreamSessionsSplitter:\n def __init__(\n self,\n test_size_ratio: float = 0.2,\n shuffle: bool = True,\n random_state: Optional[int] = None,\n ):\n \"\"\"Generate train / test clickstream sessions split.\n\n :param test_size_ratio: ratio of test split size (default: 0.2)\n :param shuffle: to shuffle or not paired clickstream sessions (default: True)\n :param random_state: random state to sklearn splitter (default: None)\n \"\"\"\n if (\n not isinstance(test_size_ratio, float)\n or test_size_ratio <= 0\n or test_size_ratio >= 1.0\n ):\n raise ValueError(\n f\"test_size_ration is a numeric value in range (0.0, 1.0)\"\n )\n\n if test_size_ratio >= 0.5:\n logger.warning(\n \"test_size_ration is larger than 0.5. It's unusual for ML to have test size > train size.\"\n )\n\n self._test_size_ratio = test_size_ratio\n\n if not isinstance(shuffle, bool):\n raise ValueError(\"shuffle should be boolean\")\n self._shuffle = shuffle\n self._random_state = random_state\n\n @property\n def shuffle(self) -> bool:\n return self._shuffle\n\n def split(self, sessions: List[ClickstreamSession]) -> DatasetDict:\n \"\"\"Split clickstream sessions.\n\n :param sessions: sessions to be split\n :return: train / test splits accordingly (PairedClickstreamDataset)\n \"\"\"\n # Get all IDs\n all_result_ids: Set[str] = set()\n for session in sessions:\n all_result_ids.update(session.results)\n\n if len(all_result_ids) == 0:\n raise ValueError(\"Sessions list is empty\")\n\n # Ensure a minimum number of unique result IDs in each set\n min_unique_test_sessions: int = int(\n self._test_size_ratio * len(sessions)\n )\n\n # Split the result IDs into train and test sets\n train_result_ids, test_result_ids = train_test_split(\n list(all_result_ids),\n test_size=self._test_size_ratio,\n random_state=self._random_state,\n )\n test_result_ids: Set[str] = set(test_result_ids)\n\n # Split sessions into train and test based on result IDs\n train_sessions: List[ClickstreamSession] = []\n test_sessions: List[ClickstreamSession] = []\n\n for session in sessions:\n if len(session.results) == 0:\n continue\n\n if (\n len(set(session.results) & test_result_ids)\n / len(session.results)\n <= 0.5\n ):\n # If less than 50% of result IDs intersect with the test set, add to the train set\n train_sessions.append(session)\n else:\n test_sessions.append(session)\n\n if len(test_sessions) < min_unique_test_sessions:\n logger.warning(\n f\"Clickstream sessions intersects highly, so they are not split well\"\n )\n random_train_session_indexess: List[int] = random.choices(\n list(range(len(train_sessions))),\n k=min_unique_test_sessions - len(test_sessions),\n )\n for i in reversed(sorted(random_train_session_indexess)):\n test_sessions.append(train_sessions.pop(i))\n\n if len(test_sessions) + len(train_sessions) < len(sessions):\n missed_sessions_count = len(sessions) - (\n len(test_sessions) + len(train_sessions)\n )\n logger.warning(\n f\"Clickstream sessions weren't split correctly, add {missed_sessions_count} more sessions to the train split.\"\n )\n\n for session in sessions:\n if (\n session not in train_sessions\n and session not in test_sessions\n ):\n train_sessions.append(session)\n\n return DatasetDict(\n {\n \"train\": PairedClickstreamDataset(\n train_sessions, self.shuffle\n ),\n \"test\": PairedClickstreamDataset(test_sessions, self.shuffle),\n }\n )" }, { "identifier": "TextQueryItem", "path": "embedding_studio/embeddings/data/clickstream/text_query_item.py", "snippet": "class TextQueryItem(QueryItem):\n text: str\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "TextQueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/text_query_retriever.py", "snippet": "class TextQueryRetriever(QueryRetriever):\n def __call__(self, query: TextQueryItem) -> str:\n if not hasattr(query, \"text\"):\n raise ValueError(f\"Query object does not have text attribute\")\n return query.text" }, { "identifier": "AWSS3DataLoader", "path": "embedding_studio/embeddings/data/loaders/s3/s3_loader.py", "snippet": "class AWSS3DataLoader(DataLoader):\n def __init__(self, retry_config: Optional[RetryConfig] = None, **kwargs):\n \"\"\"Items loader from AWS S3.\n\n :param max_attempts: maximum number of attempts (default: 10)\n :param wait_time_seconds: time to wait between (default: 10)\n :param kwargs: dict data for AWSS3Credentials\n \"\"\"\n super(AWSS3DataLoader, self).__init__(**kwargs)\n self.retry_config = (\n retry_config\n if retry_config\n else AWSS3DataLoader._get_default_retry_config()\n )\n self.credentials = AWSS3Credentials(**kwargs)\n self.attempt_exception_types = [EndpointConnectionError]\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"credentials\"] = RetryParams(\n max_attempts=settings.S3_READ_CREDENTIALS_ATTEMPTS,\n wait_time_seconds=settings.S3_READ_WAIT_TIME_SECONDS,\n )\n config[\"download_data\"] = RetryParams(\n max_attempts=settings.S3_DOWNLOAD_DATA_ATTEMPTS,\n wait_time_seconds=settings.S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS,\n )\n return config\n\n @retry_method(name=\"download_data\")\n def _read_from_s3(self, client, bucket: str, file: str) -> Image:\n return read_from_s3(client, bucket, file)\n\n @retry_method(name=\"credentials\")\n def _get_client(self, task_id: str):\n if (\n self.credentials.aws_access_key_id is None\n or self.credentials.aws_secret_access_key is None\n ) and not self.credentials.use_system_info:\n logger.warning(\n \"No specific AWS credentials, use Anonymous session\"\n )\n s3_client = boto3.client(\n \"s3\", config=Config(signature_version=UNSIGNED)\n )\n else:\n sts_client = boto3.client(\n \"sts\",\n aws_access_key_id=self.credentials.aws_access_key_id,\n aws_secret_access_key=self.credentials.aws_secret_access_key,\n )\n if self.credentials.external_id:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n ExternalId=self.credentials.external_id,\n )\n else:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n )\n credentials = assumed_role_object[\"Credentials\"]\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n )\n return s3_client\n\n def _generate_dataset_from_s3(\n self, files: List[S3FileMeta]\n ) -> Iterable[Dict]:\n if len(files) == 0:\n logger.warning(\"Nothing to download\")\n else:\n logger.info(\"Connecting to aws s3...\")\n task_id: str = str(uuid.uuid4())\n try:\n s3_client = self._get_client(task_id)\n logger.info(\"Start downloading data from S3...\")\n bad_items_count = 0\n for val in files:\n image = None\n try:\n image: Image = read_from_s3(\n s3_client, val.bucket, val.file\n )\n except Exception as e:\n logger.exception(\n f\"Unable to download an item: {val.bucket}/{val.file} Exception: {str(e)}\"\n )\n\n if image is None:\n logger.error(\n f\"Unable to download {val.file} from {val.bucket}\"\n )\n bad_items_count += 1\n continue\n yield {\"item\": image, \"item_id\": val.id}\n\n if bad_items_count == len(files):\n raise FailedToLoadAnythingFromAWSS3()\n\n except Exception as err:\n logger.error(f\"Failed to load dataset from s3: {err}\")\n raise err\n\n def load(self, items_data: List[S3FileMeta]) -> Dataset:\n return Dataset.from_generator(\n lambda: self._generate_dataset_from_s3(items_data)\n )" }, { "identifier": "CLIPItemStorageProducer", "path": "embedding_studio/embeddings/data/storages/producers/clip.py", "snippet": "class CLIPItemStorageProducer(ItemStorageProducer):\n def __init__(\n self,\n field_normalizer: DatasetFieldsNormalizer,\n id_field_name: Optional[str] = None,\n ):\n super(CLIPItemStorageProducer, self).__init__(\n ImageItemsDatasetDictPreprocessor(field_normalizer, 224),\n id_field_name,\n )" }, { "identifier": "DatasetFieldsNormalizer", "path": "embedding_studio/embeddings/data/utils/fields_normalizer.py", "snippet": "class DatasetFieldsNormalizer:\n ID_FIELD_NAME = \"item_id\"\n ITEM_FIELD_NAME = \"item\"\n\n def __init__(self, item_field_name: str, id_field_name: str):\n \"\"\"Unify column names in DatasetDict, so it can be used in fine-tuning script.\n A dataset should have ID column, related to ID in clickstream.\n\n :param item_field_name: name of column with items.\n :param id_field_name: name of ID column\n \"\"\"\n if not id_field_name:\n raise ValueError(\"id_field_name should be non-empty string\")\n self.id_field_name = id_field_name\n\n if not item_field_name:\n raise ValueError(\"item_field_name should be non-empty string\")\n self.item_field_name = item_field_name\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n id_normalizer = (\n lambda id_value: str(id_value.item())\n if (\n isinstance(id_value, Tensor)\n or isinstance(id_value, FloatTensor)\n )\n else str(id_value)\n )\n for key in dataset.keys():\n if (\n DatasetFieldsNormalizer.ID_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.id_field_name, DatasetFieldsNormalizer.ID_FIELD_NAME\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ID_FIELD_NAME} field\"\n )\n\n if (\n DatasetFieldsNormalizer.ITEM_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.item_field_name,\n DatasetFieldsNormalizer.ITEM_FIELD_NAME,\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ITEM_FIELD_NAME} field\"\n )\n\n return dataset.map(\n lambda example: {\n DatasetFieldsNormalizer.ID_FIELD_NAME: id_normalizer(\n example[DatasetFieldsNormalizer.ID_FIELD_NAME]\n )\n }\n )" }, { "identifier": "CosineProbMarginRankingLoss", "path": "embedding_studio/embeddings/losses/prob_cosine_margin_ranking_loss.py", "snippet": "class CosineProbMarginRankingLoss(ProbMarginRankingLoss):\n def __init__(self, base_margin: Optional[float] = 1.0):\n \"\"\"Embeddings Fine-tuning Loss (modification of MarginRankingLoss)\n Use sigmoid instead of ReLU + results confidences to ignore noises and mistakes.\n Adapt to cosine similarity / distance\n\n :param base_margin: margin ranking loss margin (default: 1.0)\n \"\"\"\n super(CosineProbMarginRankingLoss, self).__init__(\n base_margin=base_margin\n )\n\n def __adjust(self, adjusted_diff: FloatTensor) -> FloatTensor:\n # The way any wrong difference more than 0.01 is worth to be penaltized\n # Sigmoid with this kind of input return prob > 0.1, for difference between\n # pos and more than 0.001. That's our expected behaviour.\n # TODO: implement calculation of magic numbers\n return -400 * adjusted_diff + 6" }, { "identifier": "TextToImageCLIPModel", "path": "embedding_studio/embeddings/models/text_to_image/clip.py", "snippet": "class TextToImageCLIPModel(EmbeddingsModelInterface):\n def __init__(self, clip_model: SentenceTransformer):\n \"\"\"Wrapper to SentenceTransformer CLIP model.\n Usage: model = TextToImageCLIPModel(SentenceTransformer('clip-ViT-B-32'))\n\n :param clip_model: clip model from SentenceTransformer package\n \"\"\"\n super(TextToImageCLIPModel, self).__init__(same_query_and_items=False)\n self.clip_model = clip_model\n self.text_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_projection\"],\n )\n\n self.vision_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"vision_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"visual_projection\"],\n )\n\n def get_query_model_params(self) -> Iterator[Parameter]:\n return self.text_model.parameters()\n\n def get_items_model_params(self) -> Iterator[Parameter]:\n return self.vision_model.parameters()\n\n def fix_query_model(self, num_fixed_layers: int):\n if (\n len(self.text_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.text_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.text_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n if i < num_fixed_layers:\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_query_model(self):\n self.text_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def fix_item_model(self, num_fixed_layers: int):\n if (\n len(self.vision_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.vision_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.vision_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n if i < num_fixed_layers:\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_item_model(self):\n self.vision_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def tokenize(self, query: str) -> List[Dict]:\n return self.clip_model.tokenize([query])\n\n def forward_query(self, query: str) -> FloatTensor:\n if len(query) == 0:\n logger.warning(\"Provided query is empty\")\n\n tokenized = self.tokenize(query)\n return self.text_model.forward(tokenized[\"input_ids\"].to(self.device))\n\n def forward_items(self, items: List[np.array]) -> FloatTensor:\n if len(items) == 0:\n raise ValueError(\"items list must not be empty\")\n\n return self.vision_model.forward(torch.stack(items).to(self.device))" }, { "identifier": "SessionWithEvents", "path": "embedding_studio/models/clickstream/sessions.py", "snippet": "class SessionWithEvents(RegisteredSession):\n events: List[SessionEvent]" }, { "identifier": "FineTuningBuilder", "path": "embedding_studio/models/plugin.py", "snippet": "class FineTuningBuilder:\n data_loader: DataLoader\n query_retriever: QueryRetriever\n clickstream_parser: ClickstreamParser\n clickstream_sessions_splitter: ClickstreamSessionsSplitter\n dataset_fields_normalizer: DatasetFieldsNormalizer\n item_storage_producer: ItemStorageProducer\n accumulators: List[MetricsAccumulator]\n experiments_manager: ExperimentsManager\n fine_tuning_settings: FineTuningSettings\n initial_params: Dict[str, List[Any]]\n ranking_data: RankingData\n initial_max_evals: int = 100" }, { "identifier": "PluginMeta", "path": "embedding_studio/models/plugin.py", "snippet": "class PluginMeta(BaseModel):\n name: str\n version: str = \"1.0.0\"\n description: Optional[str] = None" }, { "identifier": "prepare_data", "path": "embedding_studio/workers/fine_tuning/data/prepare_data.py", "snippet": "def prepare_data(\n clickstream_sessions: List[Union[Dict, SessionWithEvents]],\n parser: ClickstreamParser,\n clickstream_splitter: ClickstreamSessionsSplitter,\n query_retriever: QueryRetriever,\n loader: DataLoader,\n storage_producer: ItemStorageProducer,\n) -> RankingData:\n \"\"\"Prepare fine-tuning data.\n\n :param clickstream_sessions: clickstream sessions\n :param parser: how to parse a clickstream session\n :param clickstream_splitter: how to split clickstream sessions\n :param query_retriever: retrieve query item\n :param loader: load items data\n :param storage_producer: get train/test datasets\n :return: train / test clickstream sessiobs and dataset dict\n \"\"\"\n if len(clickstream_sessions) == 0:\n raise ValueError(\"Empty clickstream sessions list\")\n\n logger.info(\"Parse clickstream sessions data\")\n raw_clickstream_sessions: List[RawClickstreamSession] = [\n (\n parser.parse(session)\n if isinstance(session, dict)\n else parser.parse_from_mongo(session)\n )\n for session in clickstream_sessions\n ]\n\n clickstream_sessions: List[ClickstreamSession] = [\n r.get_session() for r in raw_clickstream_sessions\n ]\n\n logger.info(\"Setup query retriever\")\n query_retriever.setup(clickstream_sessions)\n\n logger.info(\"Split clickstream sessions into train / test\")\n clickstream_dataset = clickstream_splitter.split(clickstream_sessions)\n logger.info(\n f'Splitting is finished, train: {len(clickstream_dataset[\"train\"])} / test: {len(clickstream_dataset[\"test\"])}'\n )\n\n logger.info(\"Get list of files to be loaded\")\n files_to_load: Set[ItemMeta] = set()\n for session in raw_clickstream_sessions:\n files_to_load.update(set([r.item for r in session.results]))\n\n if len(files_to_load) == 0:\n raise ValueError(\"Empty clickstream sessions\")\n\n logger.info(\"Download files and prepare DataDict of ItemStorage values\")\n files_to_load: List[ItemMeta] = list(files_to_load)\n\n dataset: DatasetDict = storage_producer(\n loader.load(files_to_load), clickstream_dataset\n )\n\n return RankingData(clickstream_dataset, dataset)" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "INITIAL_PARAMS", "path": "embedding_studio/workers/fine_tuning/experiments/initial_params/clip.py", "snippet": "INITIAL_PARAMS: Dict[str, List[Union[int, float]]] = {\n \"num_fixed_layers\": [5, 6, 7, 8],\n \"query_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"items_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"query_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"items_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"margin\": [0.01, 0.025, 0.05],\n}" }, { "identifier": "MetricsAccumulator", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricsAccumulator:\n def __init__(\n self,\n name: str,\n calc_mean: bool = False,\n calc_sliding: bool = False,\n calc_min: bool = False,\n calc_max: bool = False,\n window_size: int = 10,\n ):\n \"\"\"Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.\n\n :param name: metric name (metrics with other name will be ignored)\n :param calc_mean: should accumulator calculate mean value (default: False)\n :param calc_sliding: should accumulator calculate sliding mean value (default: False)\n :param calc_min: should accumulator calculate min value (default: False)\n :param calc_max: should accumulator calculate max value (default: False)\n :param window_size: size of sliding window (default: 10)\n \"\"\"\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricsAccumulator's name should not be empty\")\n\n self._name = name\n\n if not isinstance(calc_mean, bool):\n raise ValueError(\"calc_mean value should be bool\")\n self._calc_mean = calc_mean\n\n if not isinstance(calc_sliding, bool):\n raise ValueError(\"calc_sliding value should be bool\")\n self._calc_sliding = calc_sliding\n\n if not isinstance(calc_min, bool):\n raise ValueError(\"calc_min value should be bool\")\n self._calc_min = calc_min\n\n if not isinstance(calc_max, bool):\n raise ValueError(\"calc_max value should be bool\")\n self._calc_max = calc_max\n\n if not isinstance(window_size, int) or window_size <= 1:\n raise ValueError(\n \"window_size value should be integer with value more than 1\"\n )\n\n self._window_size = window_size\n self._values = []\n\n @property\n def name(self) -> str:\n return self._name\n\n def clear(self):\n \"\"\"Clear accumulator\"\"\"\n self._values = []\n\n def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:\n \"\"\"Add metric value to an accumulator.\n\n :param value: metric to be accumulated\n :return: aggregations\n \"\"\"\n if self.name == value.name:\n self._values.append(value.value)\n\n return self.aggregate()\n\n return []\n\n def aggregate(self) -> List[Tuple[str, float]]:\n \"\"\"Aggregate accumulated metrics\n\n :return: metric aggregations (last, mean, sliding, min, max)\n \"\"\"\n aggregations: List[Tuple[str, float]] = []\n if len(self._values) > 0:\n aggregations.append((self.name, self._values[-1]))\n if self._calc_mean:\n aggregations.append(\n (f\"mean_{self.name}\", float(np.mean(self._values)))\n )\n\n if self._calc_sliding:\n slide_value = float(\n np.mean(self._values)\n if len(self._values) < self._window_size\n else np.mean(self._values[-self._window_size :])\n )\n aggregations.append((f\"sliding_{self.name}\", slide_value))\n\n if self._calc_min:\n aggregations.append((f\"min_{self.name}\", np.min(self._values)))\n\n if self._calc_max:\n aggregations.append((f\"max_{self.name}\", np.max(self._values)))\n\n return aggregations" } ]
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,704
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager(
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser( TextQueryItem, SearchResult, DummyEventType ) self.splitter = ClickstreamSessionsSplitter() self.normalizer = DatasetFieldsNormalizer("item", "item_id") self.storage_producer = CLIPItemStorageProducer(self.normalizer) self.accumulators = [ MetricsAccumulator("train_loss", True, True, True, True), MetricsAccumulator( "train_not_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator( "train_irrelevant_dist_shift", True, True, True, True ), MetricsAccumulator("test_loss"), MetricsAccumulator("test_not_irrelevant_dist_shift"), MetricsAccumulator("test_irrelevant_dist_shift"), ] self.manager = ExperimentsManager(
tracking_uri=settings.MLFLOW_TRACKING_URI,
0
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n\n if isinstance(agent_idxs, str):\n if \"*\" in agent_idxs:\n self.agent_idxs = np.arange(pop.n_agents)\n else:\n self.agent_idxs = \\\n np.array([int(x) for x in agent_idxs.split(',')])\n else:\n self.agent_idxs = agent_idxs # assume array\n\n assert np.max(self.agent_idxs) < pop.n_agents, \\\n 'Agent index is out of bounds.'\n\n if isinstance(env_names, str):\n env_names = [\n x.strip() for x in env_names.split(',')\n ]\n\n self.n_episodes = n_episodes\n env_infos = create_envs_for_kwargs(env_names, env_kwargs)\n env_names = []\n self.ext_env_names = []\n env_kwargs = []\n for (name, ext_name, kwargs) in env_infos:\n env_names.append(name)\n self.ext_env_names.append(ext_name)\n env_kwargs.append(kwargs)\n self.n_envs = len(env_names)\n\n self.benvs = []\n self.env_params = []\n self.env_has_solved_rate = []\n for env_name, kwargs in zip(env_names, env_kwargs):\n benv = envs.BatchEnv(\n env_name=env_name,\n n_parallel=n_episodes,\n n_eval=1,\n env_kwargs=kwargs,\n wrappers=['monitor_return', 'monitor_ep_metrics']\n )\n self.benvs.append(benv)\n self.env_params.append(benv.env.params)\n self.env_has_solved_rate.append(benv.env.eval_solved_rate is not None)\n\n self.action_dtype = self.benvs[0].env.action_space().dtype\n\n monitored_metrics = self.benvs[0].env.get_monitored_metrics()\n self.rolling_stats = RollingStats(names=monitored_metrics, window=1)\n self._update_ep_stats = jax.vmap(\n jax.vmap(\n self.rolling_stats.update_stats, in_axes=(0,0,0,None)),\n in_axes=(0,0,0,None))\n\n self.test_return_pre = 'test_return'\n self.test_solved_rate_pre = 'test_solved_rate'\n\n self.render_mode = render_mode\n if render_mode:\n from minimax.envs.viz.grid_viz import GridVisualizer\n self.viz = GridVisualizer()\n self.viz.show()\n\n if render_mode == 'ipython':\n from IPython import display\n self.ipython_display = display\n\n def load_checkpoint_state(self, runner_state, state):\n runner_state = list(runner_state)\n runner_state[1] = runner_state[1].load_state_dict(state[1])\n\n return tuple(runner_state)\n\n @partial(jax.jit, static_argnums=(0,2))\n def _get_transition(\n self,\n rng,\n benv,\n params,\n state,\n obs,\n carry,\n zero_carry,\n extra):\n value, pi_params, next_carry = self.pop.act(params, obs, carry)\n pi = self.pop.get_action_dist(pi_params, dtype=self.action_dtype)\n rng, subrng = jax.random.split(rng)\n action = pi.sample(seed=subrng)\n log_pi = pi.log_prob(action)\n\n rng, *vrngs = jax.random.split(rng, self.pop.n_agents+1)\n\n step_args = (jnp.array(vrngs), state, action, extra)\n (next_obs, \n next_state, \n reward, \n done, \n info, \n extra) = benv.step(*step_args)\n\n # Add transition to storage\n step = (obs, action, reward, done, log_pi, value)\n if carry is not None:\n step += (carry,)\n\n # Zero carry if needed\n if carry is not None:\n next_carry = jax.vmap(_tree_util.pytree_select)(\n done, zero_carry, next_carry)\n\n if self.render_mode:\n self.viz.render(\n benv.env.params, \n jax.tree_util.tree_map(lambda x: x[0][0], state))\n if self.render_mode == 'ipython':\n self.ipython_display.display(self.viz.window.fig)\n self.ipython_display.clear_output(wait=True)\n\n return next_state, next_obs, next_carry, done, info, extra\n\n @partial(jax.jit, static_argnums=(0, 2))\n def _rollout_benv(\n self, \n rng, \n benv,\n params,\n env_params,\n state,\n obs,\n carry,\n zero_carry,\n extra,\n ep_stats):\n\n def _scan_rollout(scan_carry, rng):\n (state, \n obs, \n carry,\n extra, \n ep_stats) = scan_carry\n \n step = \\\n self._get_transition(\n rng,\n benv,\n params, \n state, \n obs, \n carry, \n zero_carry,\n extra)\n\n (next_state, \n next_obs, \n next_carry, \n done, \n info, \n extra) = step\n\n ep_stats = self._update_ep_stats(ep_stats, done, info, 1)\n\n return (next_state, next_obs, next_carry, extra, ep_stats), None\n\n n_steps = benv.env.max_episode_steps()\n rngs = jax.random.split(rng, n_steps)\n (state, \n obs, \n carry, \n extra,\n ep_stats),_ = jax.lax.scan(\n _scan_rollout,\n (state, obs, carry, extra, ep_stats),\n rngs,\n length=n_steps)\n\n return ep_stats\n\n @partial(jax.jit, static_argnums=(0,))\n def run(self, rng, params):\n \"\"\"\n Rollout agents on each env. \n\n For each env, run n_eval episodes in parallel, \n where each is indexed to return in order.\n \"\"\"\n eval_stats = self.fake_run(rng, params)\n rng, *rollout_rngs = jax.random.split(rng, self.n_envs+1)\n for i, (benv, env_param) in enumerate(zip(self.benvs, self.env_params)):\n rng, *reset_rngs = jax.random.split(rng, self.pop.n_agents+1)\n obs, state, extra = benv.reset(jnp.array(reset_rngs))\n\n if self.pop.agent.is_recurrent:\n rng, subrng = jax.random.split(rng)\n zero_carry = self.pop.init_carry(subrng, obs)\n else:\n zero_carry = None\n\n # Reset episodic stats\n ep_stats = self.rolling_stats.reset_stats(\n batch_shape=(self.pop.n_agents, self.n_episodes))\n\n ep_stats = self._rollout_benv(\n rollout_rngs[i],\n benv,\n jax.lax.stop_gradient(params), \n env_param, \n state, \n obs,\n zero_carry,\n zero_carry,\n extra,\n ep_stats)\n \n env_name = self.ext_env_names[i]\n mean_return = ep_stats['return'].mean(1)\n\n if self.env_has_solved_rate[i]:\n mean_solved_rate = jax.vmap(jax.vmap(benv.env.eval_solved_rate))(ep_stats).mean(1)\n\n for idx in self.agent_idxs:\n eval_stats[f'eval/a{idx}:{self.test_return_pre}:{env_name}'] = mean_return[idx].squeeze()\n if self.env_has_solved_rate[i]:\n eval_stats[f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}'] = mean_solved_rate[idx].squeeze()\n\n return eval_stats\n\n def fake_run(self, rng, params):\n eval_stats = {}\n for i, env_name in enumerate(self.ext_env_names):\n for idx in self.agent_idxs:\n eval_stats.update({\n f'eval/a{idx}:{self.test_return_pre}:{env_name}':0.\n })\n if self.env_has_solved_rate[i]:\n eval_stats.update({\n f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}':0.,\n })\n\n return eval_stats" }, { "identifier": "DRRunner", "path": "src/minimax/runners/dr_runner.py", "snippet": "class DRRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchEnv: Manages environment step and reset logic, using a \n\t\tpopulaton of agents.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tstudent_agents,\n\t\tn_students=1,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=256,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tnormalize_return=False,\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\tn_devices=1,\n\t\trender=False):\n\n\t\tassert len(student_agents) == 1, 'Only one type of student supported.'\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.normalize_return = normalize_return\n\t\tself.track_env_metrics = track_env_metrics\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tself.env, self.env_params = envs.make(\n\t\t\tenv_name, \n\t\t\tenv_kwargs=env_kwargs\n\t\t)\n\t\tself._action_shape = self.env.action_space().shape\n\n\t\tself.benv = envs.BatchEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics']\n\t\t)\n\t\tself.action_dtype = self.benv.env.action_space().dtype\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.env.action_space(),\n\t\t\tobs_space=self.env.observation_space(),\n\t\t\tagent=self.student_pop.agent,\n\t\t)\n\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trngs, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), n_parallel=n_parallel)\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.student_pop.init_carry(subrng, obs)\n\t\t\tself.zero_carry = jax.tree_map(lambda x: x.at[:,:self.n_parallel].get(), carry)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = self.student_pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(self.lr),\n\t\t\tend_value=-float(self.lr_final),\n\t\t\ttransition_steps=self.lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.adam(learning_rate=float(self.lr), eps=self.adam_eps)\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=self.student_pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, n_parallel*self.n_eval))\n\n\t\tstart_state = state\n\n\t\treturn (\n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate,\n\t\t\tstart_state, # Used to track metrics from starting state\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2))\n\tdef _get_transition(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tparams, \n\t\trollout, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\n\t\tpi = pop.get_action_dist(pi_params, dtype=self.action_dtype)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\t(next_obs, \n\t\t next_state, \n\t\t reward, \n\t\t done, \n\t\t info, \n\t\t extra) = self.benv.step(jnp.array(vrngs), state, action, extra)\n\n\t\tnext_start_state = jax.vmap(_tree_util.pytree_select)(\n\t\t\tdone, next_state, start_state\n\t\t)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = self.student_rollout.append(rollout, *step)\n\n\t\tif self.render:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn (\n\t\t\trollout, \n\t\t\tnext_state,\n\t\t\tnext_start_state, \n\t\t\tnext_obs, \n\t\t\tnext_carry, \n\t\t\tdone, \n\t\t\tinfo, \n\t\t\textra\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _rollout_students(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None, \n\t\tep_stats=None):\n\t\trollout = self.student_rollout.reset()\n\n\t\trngs = jax.random.split(rng, self.n_rollout_steps)\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\trollout, state, start_state, obs, carry, done, extra, ep_stats, train_state = scan_carry \n\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng, \n\t\t\t\t\tself.student_pop, \n\t\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\t\trollout, \n\t\t\t\t\tstate,\n\t\t\t\t\tstart_state, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry,\n\t\t\t\t\tdone, \n\t\t\t\t\textra)\n\t\t\t(rollout, \n\t\t\t next_state,\n\t\t\t next_start_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tep_stats = self._update_ep_stats(ep_stats, done, info)\n\n\t\t\treturn (\n\t\t\t\trollout, \n\t\t\t\tnext_state,\n\t\t\t\tnext_start_state,\n\t\t\t\tnext_obs, \n\t\t\t\tnext_carry,\n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats,\n\t\t\t\ttrain_state), None\n\n\t\t(rollout, \n\t\t state, \n\t\t start_state, \n\t\t obs, \n\t\t carry, \n\t\t done,\n\t\t extra, \n\t\t ep_stats,\n\t\t train_state), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t start_state,\n\t\t\t obs, \n\t\t\t carry, \n\t\t\t done,\n\t\t\t extra, \n\t\t\t ep_stats,\n\t\t\t train_state),\n\t\t\trngs,\n\t\t\tlength=self.n_rollout_steps,\n\t\t)\n\n\t\treturn rollout, state, start_state, obs, carry, extra, ep_stats, train_state\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None):\n\t\tstats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tstats.update(update_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], stats) # for agent0\n\t\t\t\t_stats.update({f'a{i}/{k}':v for k,v in _student_stats.items()})\n\t\t\tstats = _stats\n\n\t\tif self.track_env_metrics:\n\t\t\tmean_env_metrics = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(env_metrics)\n\t\t\tmean_env_metrics = {f'env/{k}':v for k,v in mean_env_metrics.items()}\n\n\t\t\tif self.n_students > 1:\n\t\t\t\t_env_metrics = {}\n\t\t\t\tfor i in range(self.n_students):\n\t\t\t\t\t_student_env_metrics = jax.tree_util.tree_map(lambda x: x[i], mean_env_metrics) # for agent0\n\t\t\t\t\t_env_metrics.update({f'{k}_a{i}':v for k,v in _student_env_metrics.items()})\n\t\t\t\tmean_env_metrics = _env_metrics\n\n\t\t\tstats.update(mean_env_metrics)\n\n\t\tif self.n_students == 1:\n\t\t\tstats = jax.tree_map(lambda x: x[0], stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\t\"\"\"\n\t\tPerform one update step: rollout all students and teachers + update with PPO\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# Collect env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(rollout_start_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)" }, { "identifier": "PAIREDRunner", "path": "src/minimax/runners/paired_runner.py", "snippet": "class PAIREDRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students and teachers. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchUEDEnv: Manages environment step and reset logic for a \n\t\tpopulation of agents batched over a pair of student and \n\t\tteacher MDPs.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tued_env_kwargs,\n\t\tstudent_agents,\n\t\tn_students=2,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=250,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tteacher_lr=None,\n\t\tteacher_lr_final=None,\n\t\tteacher_lr_anneal_steps=None,\n\t\tteacher_discount=0.99,\n\t\tteacher_gae_lambda=0.95,\n\t\tteacher_agents=None,\n\t\tued_score='relative_regret',\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\trender=False,\n\t\tn_devices=1):\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tued_score = UEDScore[ued_score.upper()]\n\n\t\tassert len(student_agents) == 1, \\\n\t\t\t'Only one type of student supported.'\n\t\tassert not (n_students > 2 and ued_score in [UEDScore.RELATIVE_REGRET, UEDScore.MEAN_RELATIVE_REGRET]), \\\n\t\t\t'Standard PAIRED uses only 2 students.'\n\t\tassert teacher_agents is None or len(teacher_agents) == 1, \\\n\t\t\t'Only one type of teacher supported.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.teacher_lr = \\\n\t\t\tlr if teacher_lr is None else lr\n\t\tself.teacher_lr_final = \\\n\t\t\tself.lr_final if teacher_lr_final is None else teacher_lr_final\n\t\tself.teacher_lr_anneal_steps = \\\n\t\t\tlr_anneal_steps if teacher_lr_anneal_steps is None else teacher_lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.ued_score = ued_score\n\t\tself.track_env_metrics = track_env_metrics\n\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tif teacher_agents is not None:\n\t\t\tself.teacher_pop = AgentPop(teacher_agents[0], n_agents=1)\n\n\t\t# This ensures correct partial-episodic bootstrapping by avoiding\n\t\t# any termination purely due to timeouts.\n\t\t# env_kwargs.max_episode_steps = self.n_rollout_steps + 1\n\t\tself.benv = envs.BatchUEDEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\tued_env_kwargs=ued_env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics'],\n\t\t\tued_wrappers=[]\n\t\t)\n\t\tself.teacher_n_rollout_steps = \\\n\t\t\tself.benv.env.ued_max_episode_steps()\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.benv.env.action_space(),\n\t\t\tobs_space=self.benv.env.observation_space(),\n\t\t\tagent=self.student_pop.agent\n\t\t)\n\n\t\tself.teacher_rollout = RolloutStorage(\n\t\t\tdiscount=teacher_discount,\n\t\t\tgae_lambda=teacher_gae_lambda,\n\t\t\tn_steps=self.teacher_n_rollout_steps,\n\t\t\tn_agents=1,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=1,\n\t\t\taction_space=self.benv.env.ued_action_space(),\n\t\t\tobs_space=self.benv.env.ued_observation_space(),\n\t\t\tagent=self.teacher_pop.agent,\n\t\t)\n\n\t\tued_monitored_metrics = ('return',)\n\t\tself.ued_rolling_stats = RollingStats(\t\n\t\t\tnames=ued_monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\t\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\t\tself._update_ued_ep_stats = jax.vmap(jax.vmap(self.ued_rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trng, student_rng, teacher_rng = jax.random.split(rng,3)\n\t\tstudent_info = self._reset_pop(\n\t\t\t\tstudent_rng, \n\t\t\t\tself.student_pop, \n\t\t\t\tpartial(self.benv.reset, sub_batch_size=n_parallel*self.n_eval),\n\t\t\t\tn_parallel_ep=n_parallel*self.n_eval,\n\t\t\t\tlr_init=self.lr,\n\t\t\t\tlr_final=self.lr_final,\n\t\t\t\tlr_anneal_steps=self.lr_anneal_steps)\n\n\t\tteacher_info = self._reset_pop(\n\t\t\tteacher_rng, \n\t\t\tself.teacher_pop, \n\t\t\tpartial(self.benv.reset_teacher, n_parallel=n_parallel),\n\t\t\tn_parallel_ep=n_parallel,\n\t\t\tlr_init=self.teacher_lr,\n\t\t\tlr_final=self.teacher_lr_final,\n\t\t\tlr_anneal_steps=self.teacher_lr_anneal_steps)\n\n\t\treturn (\n\t\t\trng,\n\t\t\t*student_info,\n\t\t\t*teacher_info\n\t\t)\n\n\tdef _reset_pop(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tenv_reset_fn, \n\t\tn_parallel_ep=1,\n\t\tlr_init=3e-4,\n\t\tlr_final=3e-4,\n\t\tlr_anneal_steps=0):\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\t\treset_out = env_reset_fn(jnp.array(vrngs))\n\t\tif len(reset_out) == 2:\n\t\t\tobs, state = reset_out\n\t\telse:\n\t\t\tobs, state, extra = reset_out\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif pop.agent.is_recurrent:\n\t\t\tcarry = pop.init_carry(subrng, obs)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(lr_init),\n\t\t\tend_value=-float(lr_final),\n\t\t\ttransition_steps=lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.scale_by_adam(eps=self.adam_eps),\n\t\t\toptax.scale_by_schedule(schedule_fn),\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\t\t\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(pop.n_agents,n_parallel_ep))\n\n\t\treturn train_state, state, obs, carry, ep_stats\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\t\t_state[6] = state[6].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\t\trunner_state[6] = runner_state[6].load_state_dict(state[6])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2,3))\n\tdef _get_transition(\n\t\tself,\n\t\trng, \n\t\tpop, \n\t\trollout_mgr, \n\t\trollout, \n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\t\tpi = pop.get_action_dist(pi_params)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\n\t\tif pop is self.student_pop:\n\t\t\tstep_fn = self.benv.step_student\n\t\telse:\n\t\t\tstep_fn = self.benv.step_teacher\n\t\tstep_args = (jnp.array(vrngs), state, action)\n\n\t\tif reset_state is not None: # Needed for student to reset to same instance\n\t\t\tstep_args += (reset_state,)\n\n\t\tif extra is not None:\n\t\t\tstep_args += (extra,)\n\t\t\tnext_obs, next_state, reward, done, info, extra = step_fn(*step_args)\n\t\telse:\n\t\t\tnext_obs, next_state, reward, done, info = step_fn(*step_args)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = rollout_mgr.append(rollout, *step)\n\n\t\tif self.render and pop is self.student_pop:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn rollout, next_state, next_obs, next_carry, done, info, extra\n\n\t@partial(jax.jit, static_argnums=(0,2,3,4))\n\tdef _rollout(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\trollout_mgr,\n\t\tn_steps,\n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\trngs = jax.random.split(rng, n_steps)\n\n\t\trollout = rollout_mgr.reset()\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t obs, \n\t\t\t carry,\n\t\t\t done, \n\t\t\t extra, \n\t\t\t ep_stats) = scan_carry\n\t\t\t\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng,\n\t\t\t\t\tpop, \n\t\t\t\t\trollout_mgr,\n\t\t\t\t\trollout,\n\t\t\t\t\tparams, \n\t\t\t\t\tstate, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry, \n\t\t\t\t\tdone,\n\t\t\t\t\treset_state, \n\t\t\t\t\textra)\n\n\t\t\t(rollout, \n\t\t\t next_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tif ep_stats is not None:\n\t\t\t\t_ep_stats_update_fn = self._update_ep_stats \\\n\t\t\t\t\tif pop is self.student_pop else self._update_ued_ep_stats\n\n\t\t\t\tep_stats = _ep_stats_update_fn(ep_stats, done, info)\n\n\t\t\treturn (rollout, next_state, next_obs, next_carry, done, extra, ep_stats), None\n\n\t\t(rollout, state, obs, carry, done, extra, ep_stats), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, state, obs, carry, done, extra, ep_stats),\n\t\t\trngs,\n\t\t\tlength=n_steps,\n\t\t\tunroll=self.n_unroll_rollout\n\t\t)\n\n\t\treturn rollout, state, obs, carry, extra, ep_stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, \n\t\tupdate_stats, ep_stats, \n\t\tued_update_stats, ued_ep_stats,\n\t\tenv_metrics=None,\n\t\tgrad_stats=None, ued_grad_stats=None):\n\t\tmean_returns_by_student = jax.vmap(lambda x: x.mean())(ep_stats['return'])\n\t\tmean_returns_by_teacher = jax.vmap(lambda x: x.mean())(ued_ep_stats['return'])\n\n\t\tmean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tued_mean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ued_ep_stats[k] for k in self.ued_rolling_stats.names}\n\t\t)\n\n\t\tstudent_stats = {\n\t\t\tf'mean_{k}':v for k,v in mean_ep_stats.items()\n\t\t}\n\t\tstudent_stats.update(update_stats)\n\n\t\tstats = {}\n\t\tfor i in range(self.n_students):\n\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], student_stats) # for agent0\n\t\t\tstats.update({f'{k}_a{i}':v for k,v in _student_stats.items()})\n\n\t\tteacher_stats = {\n\t\t\tf'mean_{k}_tch':v for k,v in ued_mean_ep_stats.items()\n\t\t}\n\t\tteacher_stats.update({\n\t\t\tf'{k}_tch':v[0] for k,v in ued_update_stats.items()\n\t\t})\n\t\tstats.update(teacher_stats)\n\n\t\tif self.track_env_metrics:\n\t\t\tpassable_mask = env_metrics.pop('passable')\n\t\t\tmean_env_metrics = jax.tree_util.tree_map(\n\t\t\t\tlambda x: (x*passable_mask).sum()/passable_mask.sum(), \n\t\t\t\tenv_metrics\n\t\t\t)\n\t\t\tmean_env_metrics.update({'passable_ratio': passable_mask.mean()})\n\t\t\tstats.update({\n\t\t\t\tf'env/{k}':v for k,v in mean_env_metrics.items()\n\t\t\t})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec[6] = P(None)\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate,\n\t\tobs,\n\t\tcarry,\n\t\tep_stats,\n\t\tued_train_state,\n\t\tued_state,\n\t\tued_obs,\n\t\tued_carry,\n\t\tued_ep_stats):\n\t\t\"\"\"\n\t\tPerform one update step: rollout teacher + students\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\t# === Reset teacher env + rollout teacher\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tued_reset_out = self.benv.reset_teacher(jnp.array(vrngs))\n\t\tif len(ued_reset_out) > 2:\n\t\t\tued_obs, ued_state, ued_extra = ued_reset_out\n\t\telse:\n\t\t\tued_obs, ued_state = ued_reset_out\n\t\t\tued_extra = None\n\n\t\t# Reset UED ep_stats\n\t\tif self.ued_rolling_stats is not None:\n\t\t\tued_ep_stats = self.ued_rolling_stats.reset_stats(\n\t\t\t\tbatch_shape=(1,self.n_parallel))\n\t\telse:\n\t\t\tued_ep_stats = None\n\n\t\ttch_rollout_batch_shape = (1,self.n_parallel*self.n_eval)\n\t\tdone = jnp.zeros(tch_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_rollout, ued_state, ued_obs, ued_carry, _, ued_ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng,\n\t\t\t\tself.teacher_pop,\n\t\t\t\tself.teacher_rollout,\n\t\t\t\tself.teacher_n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(ued_train_state.params), \n\t\t\t\tued_state, \n\t\t\t\tued_obs, \n\t\t\t\tued_carry,\n\t\t\t\tdone, \n\t\t\t\textra=ued_extra, \n\t\t\t\tep_stats=ued_ep_stats\n\t\t\t)\n\n\t\t# === Reset student to new envs + rollout students\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tobs, state, extra = jax.tree_util.tree_map(\n\t\t\tlambda x:x.squeeze(0), self.benv.reset_student(\n\t\t\t\tjnp.array(vrngs),\n\t\t\t\tued_state, \n\t\t\t\tself.student_pop.n_agents))\n\t\treset_state = state\n\n\t\t# Reset student ep_stats\n\t\tst_rollout_batch_shape = (self.n_students,self.n_parallel*self.n_eval)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=st_rollout_batch_shape)\n\n\t\tdone = jnp.zeros(st_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, obs, carry, extra, ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng, \n\t\t\t\tself.student_pop,\n\t\t\t\tself.student_rollout,\n\t\t\t\tself.n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(train_state.params),\n\t\t\t\tstate, \n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\treset_state=reset_state, \n\t\t\t\textra=extra, \n\t\t\t\tep_stats=ep_stats)\n\n\t\t# === Update student with PPO\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tstudent_rollout_last_value = self.student_pop.get_value(\n\t\t\tjax.lax.stop_gradient(train_state.params), obs, carry\n\t\t)\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tstudent_rollout_last_value\n\t\t)\n\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# === Update teacher with PPO\n\t\t# - Compute returns per env per agent\n\t\t# - Compute batched returns based on returns per env per agent\n\t\tued_score, _ = compute_ued_scores(self.ued_score, train_batch, self.n_eval)\n\t\tued_rollout = self.teacher_rollout.set_final_reward(ued_rollout, ued_score)\n\t\tued_train_batch = self.teacher_rollout.get_batch(\n\t\t\tued_rollout, \n\t\t\tjnp.zeros((1, self.n_parallel)) # Last step terminates episode\n\t\t)\n\n\t\tued_ep_stats = self._update_ued_ep_stats(\n\t\t\tued_ep_stats, \n\t\t\tjnp.ones((1,len(ued_score),1), dtype=jnp.bool_),\n\t\t\t{'return': jnp.expand_dims(ued_score, (0,-1))}\n\t\t)\n\n\t\t# Update teacher, batch must be 1 x Ex1\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_train_state, ued_update_stats = self.teacher_pop.update(subrng, ued_train_state, ued_train_batch)\n\n\t\t# --------------------------------------------------\n\t\t# Collect metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(reset_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tgrad_stats, ued_grad_stats = None, None\n\n\t\tstats = self._compile_stats(\n\t\t\tupdate_stats, ep_stats, \n\t\t\tued_update_stats, ued_ep_stats,\n\t\t\tenv_metrics,\n\t\t\tgrad_stats, ued_grad_stats)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tued_train_state = ued_train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng,\n\t\t\ttrain_state, state, obs, carry, ep_stats,\n\t\t\tued_train_state, ued_state, ued_obs, ued_carry, ued_ep_stats\n\t\t)" }, { "identifier": "PLRRunner", "path": "src/minimax/runners/plr_runner.py", "snippet": "class PLRRunner(DRRunner):\n\tdef __init__(\n\t\tself, \n\t\t*,\n\t\treplay_prob=0.5,\n\t\tbuffer_size=100,\n\t\tstaleness_coef=0.3,\n\t\tuse_score_ranks=True,\n\t\ttemp=1.0,\n\t\tmin_fill_ratio=0.5,\n\t\tuse_robust_plr=False,\n\t\tuse_parallel_eval=False,\n\t\tued_score='l1_value_loss',\n\t\tforce_unique=False, # Slower if True\n\t\tmutation_fn=None,\n\t\tn_mutations=0,\n\t\tmutation_criterion='batch',\n\t\tmutation_subsample_size=1,\n\t\t**kwargs):\n\t\tuse_mutations = mutation_fn is not None\n\t\tif use_parallel_eval:\n\t\t\treplay_prob = 1.0 # Replay every rollout cycle\n\t\t\tmutation_criterion = 'batch' # Force batch mutations (no UED scores)\n\t\t\tself._n_parallel_batches = 3 if use_mutations else 2\n\t\t\tkwargs['n_parallel'] *= self._n_parallel_batches\n\n\t\tsuper().__init__(**kwargs)\n\n\t\tself.replay_prob = replay_prob\n\t\tself.buffer_size = buffer_size\n\t\tself.staleness_coef = staleness_coef\n\t\tself.temp = temp\n\t\tself.use_score_ranks = use_score_ranks\n\t\tself.min_fill_ratio = min_fill_ratio\n\t\tself.use_robust_plr = use_robust_plr\n\t\tself.use_parallel_eval = use_parallel_eval\n\t\tself.ued_score = UEDScore[ued_score.upper()]\n\n\t\tself.use_mutations = use_mutations\n\t\tif self.use_mutations:\n\t\t\tself.mutation_fn = envs.get_mutator(self.benv.env_name, mutation_fn)\n\t\telse:\n\t\t\tself.mutation_fn = None\n\t\tself.n_mutations = n_mutations\n\t\tself.mutation_criterion = MutationCriterion[mutation_criterion.upper()]\n\t\tself.mutation_subsample_size = mutation_subsample_size\n\n\t\tself.force_unique = force_unique\n\t\tif force_unique:\n\t\t\tself.comparator_fn = envs.get_comparator(self.benv.env_name)\n\t\telse:\n\t\t\tself.comparator_fn = None\n\n\t\tif mutation_fn is not None and mutation_criterion != 'batch':\n\t\t\tassert self.n_parallel % self.mutation_subsample_size == 0, \\\n\t\t\t\t'Number of parallel envs must be divisible by mutation subsample size.'\n\n\tdef reset(self, rng):\n\t\trunner_state = list(super().reset(rng))\n\t\trng = runner_state[0]\n\t\trunner_state[0], subrng = jax.random.split(rng)\n\t\texample_state = self.benv.env.reset(rng)[1]\n\n\t\tself.plr_mgr = PopPLRManager(\n\t\t\tn_agents=self.n_students,\n\t\t\texample_level=example_state,\n\t\t\tued_score=self.ued_score,\n\t\t\treplay_prob=self.replay_prob,\n\t\t\tbuffer_size=self.buffer_size,\n\t\t\tstaleness_coef=self.staleness_coef,\n\t\t\ttemp=self.temp,\n\t\t\tuse_score_ranks=self.use_score_ranks,\n\t\t\tmin_fill_ratio=self.min_fill_ratio,\n\t\t\tuse_robust_plr=self.use_robust_plr,\n\t\t\tuse_parallel_eval=self.use_parallel_eval,\n\t\t\tcomparator_fn=self.comparator_fn,\n\t\t\tn_devices=self.n_devices\n\t\t)\n\t\tplr_buffer = self.plr_mgr.reset(self.n_students)\n\n\t\ttrain_state = runner_state[1]\n\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\t\tif self.n_devices == 1:\n\t\t\trunner_state[1] = train_state\n\t\telse:\n\t\t\tplr_buffer = jax.tree_map(lambda x: x.repeat(self.n_devices, 1), plr_buffer) # replicate plr buffer\n\t\t\trunner_state += (plr_buffer,) # Return PLR buffer directly to make shmap easier\n\n\t\tself.dummy_eval_output = self._create_dummy_eval_output(train_state)\n\n\t\treturn tuple(runner_state)\n\n\tdef _create_dummy_eval_output(self, train_state):\n\t\trng, *vrngs = jax.random.split(jax.random.PRNGKey(0), self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, self.n_parallel*self.n_eval))\n\n\t\tued_scores = jnp.zeros((self.n_students, self.n_parallel))\n\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\t\trollout = self.student_rollout.reset()\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstate,\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tstate,\n\t\t\ttrain_batch,\n\t\t\tued_scores\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,8))\n\tdef _eval_and_update_plr(\n\t\t\tself,\n\t\t\trng,\n\t\t\tlevels,\n\t\t\tlevel_idxs, \n\t\t\ttrain_state, \n\t\t\tupdate_plr,\n\t\t\tparent_idxs=None,\n\t\t\tdupe_mask=None,\n\t\t\tfake=False):\n\t\t# Collect rollout and optionally update plr buffer\n\t\t# Returns train_batch and ued_scores\n\t\t# Perform rollout: @todo: pmap this\n\t\tif fake:\n\t\t\tdummy_eval_output = list(self.dummy_eval_output)\n\t\t\tdummy_eval_output[1] = train_state\n\t\t\treturn tuple(dummy_eval_output)\n\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\t\tobs, state, extra = self.benv.set_state(levels)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tstart_state = state\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry\n\t\t\t)\n\t\t)\n\n\t\t# Update PLR buffer\n\t\tif self.ued_score == UEDScore.MAX_MC:\n\t\t\tmax_returns = jax.vmap(lambda x,y: x.at[y].get())(train_state.plr_buffer.max_returns, level_idxs)\n\t\t\tmax_returns = jnp.where(\n\t\t\t\tjnp.greater_equal(level_idxs, 0),\n\t\t\t\tmax_returns,\n\t\t\t\tjnp.full_like(max_returns, -jnp.inf)\n\t\t\t)\n\t\t\tued_info = {'max_returns': max_returns}\n\t\telse:\n\t\t\tued_info = None\n\t\tued_scores, ued_score_info = compute_ued_scores(\n\t\t\tself.ued_score, train_batch, self.n_eval, info=ued_info, ignore_val=-jnp.inf, per_agent=True)\n\t\tnext_plr_buffer = self.plr_mgr.update(\n\t\t\ttrain_state.plr_buffer, \n\t\t\tlevels=levels, \n\t\t\tlevel_idxs=level_idxs, \n\t\t\tued_scores=ued_scores,\n\t\t\tdupe_mask=dupe_mask, \n\t\t\tinfo=ued_score_info, \n\t\t\tignore_val=-jnp.inf,\n\t\t\tparent_idxs=parent_idxs)\n\n\t\tnext_plr_buffer = jax.vmap(\n\t\t\tlambda update, new, prev: jax.tree_map(\n\t\t\t\tlambda x, y: jax.lax.select(update, x, y), new, prev)\n\t\t)(update_plr, next_plr_buffer, train_state.plr_buffer)\n\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\trollout_start_state,\n\t\t\ttrain_batch,\n\t\t\tued_scores,\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _mutate_levels(self, rng, levels, level_idxs, ued_scores=None):\n\t\tif not self.use_mutations:\n\t\t\treturn levels, level_idxs, jnp.full_like(level_idxs, -1)\n\n\t\tdef upsample_levels(levels, level_idxs, subsample_idxs):\n\t\t\tsubsample_idxs = subsample_idxs.repeat(self.n_parallel//self.mutation_subsample_size, -1)\n\t\t\tparent_idxs = level_idxs.take(subsample_idxs)\n\t\t\tlevels = jax.vmap(\n\t\t\t\tlambda x, y: jax.tree_map(lambda _x: jnp.array(_x).take(y, 0), x)\n\t\t\t)(levels, parent_idxs)\n\t\t\t\n\t\t\treturn levels, parent_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.BATCH:\n\t\t\tparent_idxs = level_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.EASY:\n\t\t\t_, top_level_idxs = jax.lax.approx_min_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\telif self.mutation_criterion == MutationCriterion.HARD:\n\t\t\t_, top_level_idxs = jax.lax.approx_max_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\tn_parallel = level_idxs.shape[-1]\n\t\tvrngs = jax.vmap(lambda subrng: jax.random.split(subrng, n_parallel))(\n\t\t\tjax.random.split(rng, self.n_students)\n\t\t)\n\n\t\tmutated_levels = jax.vmap(\n\t\t\tlambda *args: jax.vmap(self.mutation_fn, in_axes=(0,None,0,None))(*args),\n\t\t\tin_axes=(0,None,0,None)\n\t\t)(vrngs, self.benv.env_params, levels, self.n_mutations)\n\n\t\t# Mutated levels do not have existing idxs in the PLR buffer.\n\t\tmutated_level_idxs = jnp.full((self.n_students, n_parallel), -1)\n\n\t\treturn mutated_levels, mutated_level_idxs, parent_idxs\n\n\tdef _efficient_grad_update(self, rng, train_state, train_batch, is_replay):\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tskip_grad_update = jnp.logical_and(self.use_robust_plr, ~is_replay)\n\n\t\tif self.n_students == 1:\n\t\t\ttrain_state, stats = jax.lax.cond(\n\t\t\t\tskip_grad_update[0],\n\t\t\t\tpartial(self.student_pop.update, fake=True),\n\t\t\t\tself.student_pop.update,\n\t\t\t\t*(rng, train_state, train_batch)\n\t\t\t)\n\t\telif self.n_students > 1: # Have to vmap all students + take only students that need updates\n\t\t\t_, dummy_stats = jax.vmap(lambda *_: self.student_pop.agent.get_empty_update_stats())(np.arange(self.n_students))\n\t\t\t_train_state, stats = self.student.update(rng, train_state, train_batch)\n\t\t\ttrain_state, stats = jax.vmap(lambda cond,x,y: \\\n\t\t\t\t\tjax.tree_map(lambda _cond,_x,_y: jax.lax.select(_cond,_x,_y), cond, x, y))(\n\t\t\t\t\t\tis_replay, (train_state, stats), (_train_state, dummy_stats)\n\t\t\t\t\t)\n\n\t\treturn train_state, stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None, plr_stats=None):\n\t\tstats = super()._compile_stats(update_stats, ep_stats, env_metrics)\n\n\t\tif plr_stats is not None:\n\t\t\tplr_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(plr_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_plr_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_plr_stats = jax.tree_util.tree_map(lambda x: x[i], plr_stats) # for agent0\n\t\t\t\t_plr_stats.update({f'{k}_a{i}':v for k,v in _student_plr_stats.items()})\n\t\t\tplr_stats = _plr_stats\n\t\telse:\n\t\t\tplr_stats = jax.tree_map(lambda x: x[0], plr_stats) \n\n\t\tstats.update({f'plr_{k}':v for k,v in plr_stats.items()})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None,\n\t\tplr_buffer=None):\n\t\t# If device sharded, load sharded PLR buffer into train state\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\t\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\n\t\t# Sample next training levels via PLR\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), self.n_parallel, 1)\n\n\t\tif self.use_parallel_eval:\n\t\t\tn_level_samples = self.n_parallel//self._n_parallel_batches\n\t\t\tnew_levels = jax.tree_map(lambda x: x.at[:,n_level_samples:2*n_level_samples].get(), state)\n\t\telse:\n\t\t\tn_level_samples = self.n_parallel\n\t\t\tnew_levels = state\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tlevels, level_idxs, is_replay, next_plr_buffer = \\\n\t\t\tself.plr_mgr.sample(subrng, train_state.plr_buffer, new_levels, n_level_samples)\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\t# If use_parallel_eval=True, need to combine replay and non-replay levels together\n\t\t# Need to mutate levels as well\n\t\tparent_idxs = jnp.full((self.n_students, self.n_parallel), -1)\n\t\tif self.use_parallel_eval: # Parallel ACCEL\n\t\t\tnew_level_idxs = jnp.full_like(parent_idxs, -1)\n\n\t\t\t_all_levels = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(state, levels)\n\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(new_level_idxs, level_idxs)\n\n\t\t\tif self.use_mutations:\n\t\t\t\trng, subrng = jax.random.split(rng)\n\t\t\t\tmutated_levels, mutated_level_idxs, _parent_idxs = self._mutate_levels(subrng, levels, level_idxs)\n\t\t\t\t\n\t\t\t\tfallback_levels = jax.tree_map(lambda x: x.at[:,-n_level_samples:].get(), state)\n\t\t\t\tfallback_level_idxs = jnp.full_like(mutated_level_idxs, -1)\n\n\t\t\t\tmutated_levels = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_levels, fallback_levels)\n\n\t\t\t\tmutated_level_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_level_idxs, fallback_level_idxs)\n\n\t\t\t\t_parent_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, _parent_idxs, fallback_level_idxs)\n\t\t\n\t\t\t\tmutated_levels_start_idx = 2*n_level_samples\n\t\t\t\t_all_levels = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_levels, mutated_levels)\n\t\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_level_idxs, mutated_level_idxs)\n\t\t\t\tparent_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(parent_idxs, _parent_idxs)\n\n\t\t\tlevels = _all_levels\n\t\t\tlevel_idxs = _all_level_idxs\n\n\t\t# dedupe levels, move into PLR buffer logic\n\t\tif self.force_unique:\n\t\t\tlevel_idxs, dupe_mask = self.plr_mgr.dedupe_levels(next_plr_buffer, levels, level_idxs)\n\t\telse:\n\t\t\tdupe_mask = None \n\n\t\t# Evaluate levels + update PLR\n\t\tresult = self._eval_and_update_plr(\n\t\t\trng, levels, level_idxs, train_state, update_plr=jnp.array([True]*self.n_students), parent_idxs=parent_idxs, dupe_mask=dupe_mask)\n\t\trng, train_state, state, start_state, obs, carry, extra, ep_stats, \\\n\t\t\trollout_start_state, train_batch, ued_scores = result\n\n\t\tif self.use_parallel_eval:\n\t\t\treplay_start_idx = self.n_eval*n_level_samples\n\t\t\treplay_end_idx = 2*replay_start_idx\n\t\t\ttrain_batch = jax.vmap(\n\t\t\t\tlambda x: jax.tree_map(\n\t\t\t\t\tlambda _x: _x.at[:,replay_start_idx:replay_end_idx].get(), x)\n\t\t\t\t)(train_batch)\n\n\t\t# Gradient update\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self._efficient_grad_update(subrng, train_state, train_batch, is_replay)\n\n\t\t# Mutation step\n\t\tuse_mutations = jnp.logical_and(self.use_mutations, is_replay)\n\t\tuse_mutations = jnp.logical_and(use_mutations, not self.use_parallel_eval) # Already mutated above in parallel\n\t\trng, arng, brng = jax.random.split(rng, 3)\n\n\t\tmutated_levels, mutated_level_idxs, parent_idxs = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._mutate_levels,\n\t\t\tlambda *_: (levels, level_idxs, jnp.full_like(level_idxs, -1)),\n\t\t\t*(arng, levels, level_idxs, ued_scores)\n\t\t)\n\n\t\tmutated_dupe_mask = jnp.zeros_like(mutated_level_idxs, dtype=jnp.bool_)\n\t\tif self.force_unique: # Should move into update plr logic\n\t\t\tmutated_level_idxs, mutated_dupe_mask = jax.lax.cond(\n\t\t\t\tuse_mutations.any(),\n\t\t\t\tself.plr_mgr.dedupe_levels,\n\t\t\t\tlambda *_: (mutated_level_idxs, mutated_dupe_mask),\n\t\t\t\t*(next_plr_buffer, mutated_levels, mutated_level_idxs)\n\t\t\t)\n\n\t\tmutation_eval_result = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._eval_and_update_plr,\n\t\t\tpartial(self._eval_and_update_plr, fake=True),\n\t\t\t*(brng, mutated_levels, mutated_level_idxs, train_state, use_mutations, parent_idxs, mutated_dupe_mask)\n\t\t)\n\t\ttrain_state = mutation_eval_result[1]\n\n\t\t# Collect training env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(levels)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tplr_stats = self.plr_mgr.get_metrics(train_state.plr_buffer)\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics, plr_stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tplr_buffer = train_state.plr_buffer\n\t\t\ttrain_state = train_state.replace(plr_buffer=None)\n\n\t\ttrain_state = train_state.increment()\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tplr_buffer\n\t\t)" } ]
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,378
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo(
runner_cls=PLRRunner,
3
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self.root = None\n self.template_geoms = None\n if osp.isfile(template_dir):\n self.template_geoms = joblib.load(template_dir)\n\n def forward_bvh(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n else:\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bvh(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n scale,\n jrange,\n exclude_bones=None,\n channels=None,\n spec_channels=None,\n ):\n if channels is None:\n channels = [\"x\", \"y\", \"z\"]\n if exclude_bones is None:\n exclude_bones = {}\n if spec_channels is None:\n spec_channels = dict()\n\n joint_names = list(\n filter(lambda x: all([t not in x for t in exclude_bones]), offsets.keys())\n )\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.channels = channels\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n \n bone.channels = (\n spec_channels[joint] if joint in spec_channels.keys() else channels\n )\n bone.dof_index = [dof_ind[x] for x in bone.channels]\n bone.offset = np.array(offsets[joint]) * self.len_scale\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n\n\n self.bones.append(bone)\n self.name2bone[joint] = bone\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n # print(parent_name)\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bvh(self.root)\n # import pdb\n # pdb.set_trace()\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n\n def write_xml(\n self,\n fname,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n tree.write(fname, pretty_print=True)\n\n def write_str(\n self,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"500\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n\n return etree.tostring(tree, pretty_print=False)\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n attr[\"user\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.end + offset))\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"limited\"] = \"false\"\n j_attr[\"type\"] = \"free\"\n j_attr[\"armature\"] = \"0\"\n j_attr[\"damping\"] = \"0\"\n # j_attr[\"stiffness\"] = \"500\"\n SubElement(node, \"joint\", j_attr)\n else:\n for i in range(len(bone.dof_index)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n j_attr[\"name\"] = bone.name + \"_\" + self.dof_name[ind]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n j_attr[\"stiffness\"] = \"500\"\n j_attr[\"damping\"] = \"50\"\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n\n SubElement(node, \"joint\", j_attr)\n\n # write geometry\n if self.template_geoms is None or len(self.template_geoms[bone.name]) == 0:\n if bone.parent is None:\n g_attr = dict()\n g_attr[\"size\"] = \"0.0300\"\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n else:\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n g_attr = dict()\n g_attr[\"size\"] = \"0.0100\"\n if bone.name.endswith(\"3\"):\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *(bone.pos + offset)\n )\n else:\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2])\n )\n\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n\n else:\n g_attr = dict()\n template_attributes = self.template_geoms[bone.name][0]\n g_attr[\"type\"] = template_attributes[\"type\"]\n # g_attr[\"contype\"] = template_attributes[\"contype\"]\n # g_attr[\"conaffinity\"] = template_attributes[\"conaffinity\"]\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n g_attr[\"density\"] = \"500\"\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n # template_attributes[\"start\"]\n if g_attr[\"type\"] == \"capsule\":\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate(\n [e1, e2]\n )\n )\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n elif g_attr[\"type\"] == \"box\":\n # g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n # *template_attributes[\"start\"]\n # )\n multiplier = np.linalg.norm(e2 - e1) / 0.0945\n pos = (e1 + e2) / 2\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n pos[1] += 0.05\n \n\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *template_attributes[\"size\"] * multiplier\n )\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(\n *template_attributes[\"rot\"]\n )\n elif g_attr[\"type\"] == \"sphere\":\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *np.zeros(3)\n )\n SubElement(node, \"geom\", g_attr)\n\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_mesh_local.py", "snippet": "class Skeleton:\n def __init__(self, geom_dir, rel_geom_dir):\n self.geom_dir = geom_dir\n self.rel_geom_dir = rel_geom_dir\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.root = None\n self.equalities = None\n self.exclude_contacts = None\n self.collision_groups = None\n self.simple_geom = False\n self.buffer_dict = {\"njmax\": \"2500\", \"nconmax\": \"500\"}\n\n def forward_bones(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bones(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n axes,\n channels,\n jrange,\n sites,\n scale,\n equalities,\n exclude_contacts=None,\n collision_groups=None,\n conaffinity=None,\n simple_geom=False,\n color_dict=None,\n ):\n if exclude_contacts is None:\n exclude_contacts = []\n if collision_groups is None:\n collision_groups = {}\n self.exclude_contacts = exclude_contacts\n self.collision_groups = {}\n self.conaffinity = {}\n self.color_dict = color_dict\n\n for group, bones in collision_groups.items():\n for bone in bones:\n self.collision_groups[bone] = group\n\n for group, bones in conaffinity.items():\n for bone in bones:\n self.conaffinity[bone] = group\n\n self.simple_geom = simple_geom\n\n joint_names = list(offsets.keys())\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.equalities = equalities\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.orient = axes[joint_names[0]]\n self.root.pos = offsets[joint_names[0]]\n self.root.sites = sites.get(joint_names[0], [])\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n bone.channels = channels[joint]\n bone.dof_index = [dof_ind[x[0]] for x in bone.channels]\n bone.offset = offsets[joint] * self.len_scale\n bone.orient = axes[joint]\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n bone.sites = sites.get(joint, [])\n self.bones.append(bone)\n self.name2bone[joint] = bone\n\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bones(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.ends.append(bone.pos.copy())\n else:\n for bone_c in bone.child:\n bone.ends.append(bone_c.pos.copy())\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n return etree.tostring(tree, pretty_print=True)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n # create sensors\n # sensor = tree.getroot().find(\"sensor\")\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'frameangvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'xbody', 'objname': bone.name})\n\n tree.write(fname, pretty_print=True)\n\n def construct_tree(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create meshes\n asset = tree.getroot().find(\"asset\")\n for bone in self.bones:\n if os.path.exists(f\"{self.geom_dir}/geom/{bone.name}.stl\"):\n attr = {\n \"file\": f\"{self.rel_geom_dir}/geom/{bone.name}.stl\",\n \"name\": f\"{bone.name}_mesh\"\n }\n SubElement(asset, \"mesh\", attr)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n\n # create exclude contacts\n c_node = tree.getroot().find(\"contact\")\n for bname1, bname2 in self.exclude_contacts:\n attr = {\"body1\": bname1, \"body2\": bname2}\n SubElement(c_node, \"exclude\", attr)\n # create equalities\n eq_node = tree.getroot().find(\"equality\")\n for eq_joints in self.equalities.values():\n for j1 in range(len(eq_joints) - 1):\n for j2 in range(j1 + 1, len(eq_joints)):\n jname1, jcoeff1 = eq_joints[j1]\n jname2, jcoeff2 = eq_joints[j2]\n coeff = jcoeff1 / jcoeff2\n attr = {\n \"joint1\": jname1,\n \"joint2\": jname2,\n \"polycoef\": f\"0 {coeff:.6f} 0 0 0\",\n }\n SubElement(eq_node, \"joint\", attr)\n return tree\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n quat = quaternion_from_matrix(bone.orient)\n attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*quat)\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"limited\"] = \"false\"\n # j_attr[\"type\"] = \"free\"\n # j_attr[\"armature\"] = \"0.02\"\n # j_attr[\"damping\"] = \"50\"\n # j_attr[\"stiffness\"] = \"500\"\n # j_attr[\"frictionloss\"] = \"0\"\n \n SubElement(node, \"freejoint\", j_attr)\n else:\n\n for i in range(len(bone.channels)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n \n \n j_attr[\"name\"] = bone.name + \"_\" + bone.channels[i]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n\n\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0])\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n j_attr[\"armature\"] = \"0.02\"\n \n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n SubElement(node, \"joint\", j_attr)\n\n # write sites\n for s_name, s_pos, s_quat in bone.sites:\n s_attr = {\"name\": s_name}\n s_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(s_pos + offset))\n s_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*s_quat)\n s_attr[\"type\"] = \"sphere\"\n s_attr[\"size\"] = \"0.03\"\n SubElement(node, \"site\", s_attr)\n\n # write geometry\n geom_path = f\"{self.geom_dir}/geom/{bone.name}.stl\"\n \n if not self.simple_geom:\n assert os.path.exists(geom_path)\n if os.path.exists(geom_path):\n g_attr = {\"type\": \"mesh\", \"mesh\": f\"{bone.name}_mesh\"}\n if bone.name in self.collision_groups.keys():\n g_attr[\"density\"] = \"900\"\n # g_attr[\"density\"] = \"400\"\n # g_attr[\"density\"] = \"1000\"\n \n g_attr[\"contype\"] = str(self.collision_groups[bone.name])\n g_attr[\"conaffinity\"] = str(self.conaffinity[bone.name])\n\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"size\"] = str(10)\n # g_attr[\"friction\"] = \"0.000000000005 0.000000000005 0.1\"\n if not self.color_dict is None:\n g_attr[\"rgba\"] = self.color_dict[bone.name]\n\n # if bone.name in [\"L_Ankle\", \"R_Ankle\", \"L_Toe\", \"R_Toe\"]:\n # g_attr[\"friction\"] = \"5 500 500\"\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"margin\"] = \"0.0000000000000000001\"\n\n # g_attr[\"solimp\"] = \"0.9 0.99 0.0001 0.5 2\"\n # g_attr[\"solref\"] = \"0.001 0.5\"\n # g_attr[\"condim\"] = \"6\"\n # g_attr[\"friction\"] = \"0 0 0\"\n\n SubElement(node, \"geom\", g_attr)\n else:\n for end in bone.ends:\n g_attr = dict()\n e1 = bone.pos + offset\n e2 = end + offset\n v = e2 - e1\n if np.linalg.norm(v) > 1e-6:\n v /= np.linalg.norm(v)\n e1 += v * 0.02\n e2 -= v * 0.02\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n else:\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*bone.pos)\n g_attr[\"size\"] = \"0.0300\" if self.simple_geom else \"0.0100\"\n if not self.simple_geom:\n g_attr[\"contype\"] = \"0\"\n g_attr[\"conaffinity\"] = \"0\"\n elif bone.name in self.collision_groups.keys():\n group = str(self.collision_groups[bone.name])\n g_attr[\"contype\"] = group\n g_attr[\"conaffinity\"] = group\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n self.register_buffer('children_map', self._parents_to_children(self.parents))\n\n def _parents_to_children(self, parents):\n self.SPINE3_IDX = 9\n children = torch.ones_like(parents) * -1\n for i in range(24):\n if parents[i] != -1 and children[parents[i]] < 0:\n children[parents[i]] = i\n\n children[self.SPINE3_IDX] = -3\n children[0] = 3\n children[self.SPINE3_IDX] = SMPL_BONE_ORDER_NAMES.index('Neck')\n return children\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None, root_trans=None, root_scale=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n \n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n if root_trans is not None:\n if root_scale is None:\n root_scale = torch.ones_like(root_trans[:, 0])\n cur_root_trans = joints[:, [0], :]\n vertices[:] = (vertices - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n joints[:] = (joints - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels, self.joint_range\n\n def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), scale=None, flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n if scale is not None:\n verts *= scale\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n if scale is not None:\n joint_pos *= scale\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot = False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate([np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n \n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n \n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "quadric_mesh_decimation", "path": "uhc/utils/geom.py", "snippet": "def quadric_mesh_decimation(fname, reduction_rate, verbose=False):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutput()\n\n decimate = vtkQuadricDecimation()\n decimate.SetInputData(inputPoly)\n decimate.SetTargetReduction(reduction_rate)\n decimate.Update()\n decimatedPoly = vtkPolyData()\n decimatedPoly.ShallowCopy(decimate.GetOutput())\n\n if verbose:\n print(\n f\"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) \"\n f\"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})\"\n )\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputData(decimatedPoly)\n stlWriter.Write()" }, { "identifier": "flags", "path": "uhc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, *items):" } ]
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
15,111
# ############################## # joint_range["L_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Thorax"][1] = np.array([-np.pi , np.pi]) # joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["L_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Shoulder"][1] = np.array([-np.pi , np.pi / 2]) # joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Shoulder"][1] = np.array([-np.pi/2, np.pi]) # joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) # ############################## # joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["L_Hip"][2] = np.array([-np.pi / 3, np.pi /2]) # joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["R_Hip"][2] = np.array([-np.pi / 2, np.pi / 3]) # joint_range["L_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["L_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["R_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) self.height = np.max(verts[:, 1]) - np.min(verts[:, 1]) size_dict = {} if ( len(self.get_params(get_name=True)) > 1 and not params is None ): # ZL: dank code, very dank code self.set_params(params) size_dict = self.get_size() size_dict = self.enforce_length_size(size_dict) # Gear based size # gear_dict = self.get_gear() # for k, v in size_dict.items(): # for idx, suffix in enumerate(["_x", "_y", "_z"]): # if k + suffix in gear_dict: # size_dict[k][idx] *= gear_dict[k + suffix] self.hull_dict = get_joint_geometries( verts, joints, skin_weights, joint_names, scale_dict=size_dict, geom_dir=f"{self.geom_dir}/geom", ) self.skeleton.load_from_offsets( joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, sites={}, scale=1, equalities={}, exclude_contacts = [ ["Chest", "L_Shoulder"], ["Chest", "R_Shoulder"], ["Chest", "R_Thorax"], ["Chest", "L_Thorax"], ['L_Hip', 'Pelvis'], ['R_Hip', 'Pelvis'], ['Torso', 'Pelvis'], ['L_Knee', 'L_Hip'], ['R_Knee', 'R_Hip'], ['Spine', 'Torso'], ['L_Ankle', 'L_Knee'], ['R_Ankle', 'R_Knee'], ['Chest', 'Spine'], ['L_Toe', 'L_Ankle'], ['R_Toe', 'R_Ankle'], ['Neck', 'Chest'], ['L_Thorax', 'Chest'], ['R_Thorax', 'Chest'], ['Head', 'Neck'], ['L_Shoulder', 'L_Thorax'], ['R_Shoulder', 'R_Thorax'], ['L_Elbow', 'L_Shoulder'], ['R_Elbow', 'R_Shoulder'], ['L_Wrist', 'L_Elbow'], ['R_Wrist', 'R_Elbow'], ['L_Hand', 'L_Wrist'], ['R_Hand', 'R_Wrist'] ], collision_groups=contype, conaffinity=conaffinity, simple_geom=False, ) else:
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad(parse_vec(node.attrib.get("range", "-360 360"))) actu_node = ( body.tree.getroot().find("actuator").find(f'motor[@joint="{self.name}"]') ) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = ( parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0]) ) self.stiffness = ( parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0]) ) self.armature = ( parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01]) ) self.frictionloss = ( parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0]) ) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec] ) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping] ) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness] ) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature] ) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = " ".join( # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.frictionloss] # ) # if np.sum([self.name.startswith(i) for i in ["L_Knee", "R_Knee", "L_Ankle", "R_Ankle", "L_Toe", "R_Toe"]]): # self.node.attrib["frictionloss"] = "500" # self.node.attrib["stiffness"] = "5" # self.node.attrib["damping"] = "5" # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = "5000" def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False ): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False ): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False ): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs["frictionloss"].get( "rel", False ): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range( params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]) ) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = ( parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1]) ) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = ( parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1]) ) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start ) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size] ) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density] ) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if not self.param_inited and self.param_specs["size"].get( "rel", False ): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2) ) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" ): if not self.param_inited and self.param_specs["ext_start"].get( "rel", False ): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False ): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs["pos_delta"].get( "rel", False ): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get("rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = ( node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}" ) self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = ( self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos ) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params( self, param_list, get_name=False, pad_zeros=False, demap_params=False ): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False ): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf) ), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf) ), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False ): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False ): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral", create_transl=False) self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male", create_transl=False) self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female", create_transl=False) elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser( model_path=data_dir, gender="male", use_pca=False, create_transl=False ) self.smpl_parser_f = SMPLH_Parser( model_path=data_dir, gender="female", use_pca=False, create_transl=False ) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser( model_path=data_dir, gender="male", use_pca=False, create_transl=False ) self.smpl_parser_f = SMPLX_Parser( model_path=data_dir, gender="female", use_pca=False, create_transl=False ) if create_default_skeleton: self.load_from_skeleton() if clean_up: atexit.register(self.clean_up) def set_model_xml_path(self, model_xml_path): self.model_xml_path = model_xml_path self.model_dir = osp.dirname(model_xml_path) self.geom_dir = f'{self.model_dir}/mesh/{uuid.uuid4()}' os.makedirs(self.model_dir, exist_ok=True) def clean_up(self): if os.path.exists(self.model_xml_path): os.remove(self.model_xml_path) if osp.isdir(self.geom_dir): shutil.rmtree(self.geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts( pose=pose_aa, th_betas=th_betas, th_trans=th_trans ) return vertices, joints def load_from_skeleton( self, betas=None, scale=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, model_xml_path=None, ): if model_xml_path is not None: self.set_model_xml_path(model_xml_path) self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) else: if len(betas.shape) == 1: betas = betas[None, :] if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[ None, ] ) if flags.debug: print(self.beta) ## Clear up beta for smpl and smplh if self.smpl_model == "smpl" and self.beta.shape[1] == 16: self.beta = self.beta[:, :10] # print(f"Incorrect shape size for {self.model}!!!") elif self.smpl_model == "smplh" and self.beta.shape[1] == 10: self.beta = torch.hstack([self.beta, torch.zeros((1, 6)).float()]) # print(f"Incorrect shape size for {self.model}!!!") if self.mesh: rel_geom_dir = os.path.relpath(self.geom_dir, self.model_dir) self.skeleton = SkeletonMesh(self.geom_dir, rel_geom_dir) zero_pose = torch.zeros((1,72)) ( verts, joints, skin_weights, joint_names, joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, contype, conaffinity, ) = (smpl_parser.get_mesh_offsets( zero_pose=zero_pose, betas=self.beta, flatfoot=self.flatfoot, scale=scale) if self.smpl_model != "smplx" else smpl_parser.get_mesh_offsets(v_template=v_template)) # if self.rel_joint_lm: # # if False: # joint_range["Head"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Head"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Head"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Chest"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Spine"][2] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Torso"][0] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["Torso"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["Torso"][2] = np.array([-np.pi / 3, np.pi / 3]) # ############################## # joint_range["L_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Thorax"][1] = np.array([-np.pi , np.pi]) # joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) # joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) # joint_range["L_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["L_Shoulder"][1] = np.array([-np.pi , np.pi / 2]) # joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) # joint_range["R_Shoulder"][0] = np.array([-np.pi , np.pi ]) # joint_range["R_Shoulder"][1] = np.array([-np.pi/2, np.pi]) # joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) # ############################## # joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["L_Hip"][2] = np.array([-np.pi / 3, np.pi /2]) # joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) # joint_range["R_Hip"][2] = np.array([-np.pi / 2, np.pi / 3]) # joint_range["L_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["L_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][0] = np.array([-np.pi / 16, np.pi]) # joint_range["R_Knee"][1] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["R_Knee"][2] = np.array([-np.pi / 16, np.pi / 16]) # joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) # joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) # joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) self.height = np.max(verts[:, 1]) - np.min(verts[:, 1]) size_dict = {} if ( len(self.get_params(get_name=True)) > 1 and not params is None ): # ZL: dank code, very dank code self.set_params(params) size_dict = self.get_size() size_dict = self.enforce_length_size(size_dict) # Gear based size # gear_dict = self.get_gear() # for k, v in size_dict.items(): # for idx, suffix in enumerate(["_x", "_y", "_z"]): # if k + suffix in gear_dict: # size_dict[k][idx] *= gear_dict[k + suffix] self.hull_dict = get_joint_geometries( verts, joints, skin_weights, joint_names, scale_dict=size_dict, geom_dir=f"{self.geom_dir}/geom", ) self.skeleton.load_from_offsets( joint_offsets, joint_parents, joint_axes, joint_dofs, joint_range, sites={}, scale=1, equalities={}, exclude_contacts = [ ["Chest", "L_Shoulder"], ["Chest", "R_Shoulder"], ["Chest", "R_Thorax"], ["Chest", "L_Thorax"], ['L_Hip', 'Pelvis'], ['R_Hip', 'Pelvis'], ['Torso', 'Pelvis'], ['L_Knee', 'L_Hip'], ['R_Knee', 'R_Hip'], ['Spine', 'Torso'], ['L_Ankle', 'L_Knee'], ['R_Ankle', 'R_Knee'], ['Chest', 'Spine'], ['L_Toe', 'L_Ankle'], ['R_Toe', 'R_Ankle'], ['Neck', 'Chest'], ['L_Thorax', 'Chest'], ['R_Thorax', 'Chest'], ['Head', 'Neck'], ['L_Shoulder', 'L_Thorax'], ['R_Shoulder', 'R_Thorax'], ['L_Elbow', 'L_Shoulder'], ['R_Elbow', 'R_Shoulder'], ['L_Wrist', 'L_Elbow'], ['R_Wrist', 'R_Elbow'], ['L_Hand', 'L_Wrist'], ['R_Hand', 'R_Wrist'] ], collision_groups=contype, conaffinity=conaffinity, simple_geom=False, ) else:
self.skeleton = Skeleton()
1
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
nerfstudio/models/semantic_nerfw.py
[ { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self) -> int:\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indices.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin.\n bin_ends: Distance from origin to end of bin.\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n )\n\n return ray_samples" }, { "identifier": "Semantics", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class Semantics:\n \"\"\"Dataclass for semantic labels.\"\"\"\n\n filenames: List[Path]\n \"\"\"filenames to load semantic data\"\"\"\n classes: List[str]\n \"\"\"class labels for data\"\"\"\n colors: torch.Tensor\n \"\"\"color mapping for classes\"\"\"\n mask_classes: List[str] = field(default_factory=lambda: [])\n \"\"\"classes to mask out from training for all modalities\"\"\"" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "TrainingCallbackLocation", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()" }, { "identifier": "FieldHeadNames", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class FieldHeadNames(Enum):\n \"\"\"Possible field outputs\"\"\"\n\n RGB = \"rgb\"\n SH = \"sh\"\n DENSITY = \"density\"\n NORMALS = \"normals\"\n PRED_NORMALS = \"pred_normals\"\n UNCERTAINTY = \"uncertainty\"\n TRANSIENT_RGB = \"transient_rgb\"\n TRANSIENT_DENSITY = \"transient_density\"\n SEMANTICS = \"semantics\"" }, { "identifier": "SceneContraction", "path": "nerfstudio/field_components/spatial_distortions.py", "snippet": "class SceneContraction(SpatialDistortion):\n \"\"\"Contract unbounded space using the contraction was proposed in MipNeRF-360.\n We use the following contraction equation:\n\n .. math::\n\n f(x) = \\\\begin{cases}\n x & ||x|| \\\\leq 1 \\\\\\\\\n (2 - \\\\frac{1}{||x||})(\\\\frac{x}{||x||}) & ||x|| > 1\n \\\\end{cases}\n\n If the order is not specified, we use the Frobenius norm, this will contract the space to a sphere of\n radius 1. If the order is L_inf (order=float(\"inf\")), we will contract the space to a cube of side length 2.\n If using voxel based encodings such as the Hash encoder, we recommend using the L_inf norm.\n\n Args:\n order: Order of the norm. Default to the Frobenius norm. Must be set to None for Gaussians.\n\n \"\"\"\n\n def __init__(self, order: Optional[Union[float, int]] = None) -> None:\n super().__init__()\n self.order = order\n\n def forward(self, positions):\n def contract(x):\n mag = torch.linalg.norm(x, ord=self.order, dim=-1)[..., None]\n return torch.where(mag < 1, x, (2 - (1 / mag)) * (x / mag))\n\n if isinstance(positions, Gaussians):\n means = contract(positions.mean.clone())\n\n contract = lambda x: (2 - (1 / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True))) * (\n x / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True)\n )\n jc_means = vmap(jacrev(contract))(positions.mean.view(-1, positions.mean.shape[-1]))\n jc_means = jc_means.view(list(positions.mean.shape) + [positions.mean.shape[-1]])\n\n # Only update covariances on positions outside the unit sphere\n mag = positions.mean.norm(dim=-1)\n mask = mag >= 1\n cov = positions.cov.clone()\n cov[mask] = jc_means[mask] @ positions.cov[mask] @ torch.transpose(jc_means[mask], -2, -1)\n\n return Gaussians(mean=means, cov=cov)\n\n return contract(positions)" }, { "identifier": "HashMLPDensityField", "path": "nerfstudio/fields/density_fields.py", "snippet": "class HashMLPDensityField(Field):\n \"\"\"A lightweight density field module.\n\n Args:\n aabb: parameters of scene aabb bounds\n num_layers: number of hidden layers\n hidden_dim: dimension of hidden layers\n spatial_distortion: spatial distortion module\n use_linear: whether to skip the MLP and use a single linear layer instead\n \"\"\"\n\n def __init__(\n self,\n aabb: TensorType,\n num_layers: int = 2,\n hidden_dim: int = 64,\n spatial_distortion: Optional[SpatialDistortion] = None,\n use_linear: bool = False,\n num_levels: int = 8,\n max_res: int = 1024,\n base_res: int = 16,\n log2_hashmap_size: int = 18,\n features_per_level: int = 2,\n ) -> None:\n super().__init__()\n self.register_buffer(\"aabb\", aabb)\n self.spatial_distortion = spatial_distortion\n self.use_linear = use_linear\n growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1))\n\n self.register_buffer(\"max_res\", torch.tensor(max_res))\n self.register_buffer(\"num_levels\", torch.tensor(num_levels))\n self.register_buffer(\"log2_hashmap_size\", torch.tensor(log2_hashmap_size))\n\n config = {\n \"encoding\": {\n \"otype\": \"HashGrid\",\n \"n_levels\": num_levels,\n \"n_features_per_level\": features_per_level,\n \"log2_hashmap_size\": log2_hashmap_size,\n \"base_resolution\": base_res,\n \"per_level_scale\": growth_factor,\n },\n \"network\": {\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": hidden_dim,\n \"n_hidden_layers\": num_layers - 1,\n },\n }\n\n if not self.use_linear:\n self.mlp_base = tcnn.NetworkWithInputEncoding(\n n_input_dims=3,\n n_output_dims=1,\n encoding_config=config[\"encoding\"],\n network_config=config[\"network\"],\n )\n else:\n self.encoding = tcnn.Encoding(n_input_dims=3, encoding_config=config[\"encoding\"])\n self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)\n\n def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, None]:\n if self.spatial_distortion is not None:\n positions = self.spatial_distortion(ray_samples.frustums.get_positions())\n positions = (positions + 2.0) / 4.0\n else:\n positions = SceneBox.get_normalized_positions(ray_samples.frustums.get_positions(), self.aabb)\n # Make sure the tcnn gets inputs between 0 and 1.\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1)\n positions = positions * selector[..., None]\n positions_flat = positions.view(-1, 3)\n if not self.use_linear:\n density_before_activation = (\n self.mlp_base(positions_flat).view(*ray_samples.frustums.shape, -1).to(positions)\n )\n else:\n x = self.encoding(positions_flat).to(positions)\n density_before_activation = self.linear(x).view(*ray_samples.frustums.shape, -1)\n\n # Rectifying the density with an exponential is much more stable than a ReLU or\n # softplus, because it enables high post-activation (float32) density outputs\n # from smaller internal (float16) parameters.\n density = trunc_exp(density_before_activation)\n density = density * selector[..., None]\n return density, None\n\n def get_outputs(self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None) -> dict:\n return {}" }, { "identifier": "TCNNNerfactoField", "path": "nerfstudio/fields/nerfacto_field.py", "snippet": "class TCNNNerfactoField(Field):\n \"\"\"Compound Field that uses TCNN\n\n Args:\n aabb: parameters of scene aabb bounds\n num_images: number of images in the dataset\n num_layers: number of hidden layers\n hidden_dim: dimension of hidden layers\n geo_feat_dim: output geo feat dimensions\n num_levels: number of levels of the hashmap for the base mlp\n max_res: maximum resolution of the hashmap for the base mlp\n log2_hashmap_size: size of the hashmap for the base mlp\n num_layers_color: number of hidden layers for color network\n num_layers_transient: number of hidden layers for transient network\n hidden_dim_color: dimension of hidden layers for color network\n hidden_dim_transient: dimension of hidden layers for transient network\n appearance_embedding_dim: dimension of appearance embedding\n transient_embedding_dim: dimension of transient embedding\n use_transient_embedding: whether to use transient embedding\n use_semantics: whether to use semantic segmentation\n num_semantic_classes: number of semantic classes\n use_pred_normals: whether to use predicted normals\n use_average_appearance_embedding: whether to use average appearance embedding or zeros for inference\n spatial_distortion: spatial distortion to apply to the scene\n \"\"\"\n\n def __init__(\n self,\n aabb: TensorType,\n num_images: int,\n num_layers: int = 2,\n hidden_dim: int = 64,\n geo_feat_dim: int = 15,\n num_levels: int = 16,\n max_res: int = 2048,\n log2_hashmap_size: int = 19,\n num_layers_color: int = 3,\n num_layers_transient: int = 2,\n hidden_dim_color: int = 64,\n hidden_dim_transient: int = 64,\n appearance_embedding_dim: int = 32,\n transient_embedding_dim: int = 16,\n use_transient_embedding: bool = False,\n use_semantics: bool = False,\n num_semantic_classes: int = 100,\n pass_semantic_gradients: bool = False,\n use_pred_normals: bool = False,\n use_average_appearance_embedding: bool = False,\n spatial_distortion: SpatialDistortion = None,\n use_appearance_embedding: bool = True,\n ) -> None:\n super().__init__()\n\n self.register_buffer(\"aabb\", aabb)\n self.geo_feat_dim = geo_feat_dim\n\n self.register_buffer(\"max_res\", torch.tensor(max_res))\n self.register_buffer(\"num_levels\", torch.tensor(num_levels))\n self.register_buffer(\"log2_hashmap_size\", torch.tensor(log2_hashmap_size))\n\n self.spatial_distortion = spatial_distortion\n self.num_images = num_images\n self.appearance_embedding_dim = appearance_embedding_dim\n\n self.use_appearance_embedding = use_appearance_embedding\n if self.use_appearance_embedding:\n self.embedding_appearance = Embedding(self.num_images, self.appearance_embedding_dim)\n self.use_average_appearance_embedding = use_average_appearance_embedding\n self.use_transient_embedding = use_transient_embedding\n self.use_semantics = use_semantics\n self.use_pred_normals = use_pred_normals\n self.pass_semantic_gradients = pass_semantic_gradients\n\n base_res: int = 16\n features_per_level: int = 2\n growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1))\n\n self.direction_encoding = tcnn.Encoding(\n n_input_dims=3,\n encoding_config={\n \"otype\": \"SphericalHarmonics\",\n \"degree\": 4,\n },\n )\n\n self.position_encoding = tcnn.Encoding(\n n_input_dims=3,\n encoding_config={\"otype\": \"Frequency\", \"n_frequencies\": 2},\n )\n\n self.mlp_base = tcnn.NetworkWithInputEncoding(\n n_input_dims=3,\n n_output_dims=1 + self.geo_feat_dim,\n encoding_config={\n \"otype\": \"HashGrid\",\n \"n_levels\": num_levels,\n \"n_features_per_level\": features_per_level,\n \"log2_hashmap_size\": log2_hashmap_size,\n \"base_resolution\": base_res,\n \"per_level_scale\": growth_factor,\n },\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": hidden_dim,\n \"n_hidden_layers\": num_layers - 1,\n },\n )\n\n # transients\n if self.use_transient_embedding:\n self.transient_embedding_dim = transient_embedding_dim\n self.embedding_transient = Embedding(self.num_images, self.transient_embedding_dim)\n self.mlp_transient = tcnn.Network(\n n_input_dims=self.geo_feat_dim + self.transient_embedding_dim,\n n_output_dims=hidden_dim_transient,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": hidden_dim_transient,\n \"n_hidden_layers\": num_layers_transient - 1,\n },\n )\n self.field_head_transient_uncertainty = UncertaintyFieldHead(in_dim=self.mlp_transient.n_output_dims)\n self.field_head_transient_rgb = TransientRGBFieldHead(in_dim=self.mlp_transient.n_output_dims)\n self.field_head_transient_density = TransientDensityFieldHead(in_dim=self.mlp_transient.n_output_dims)\n\n # semantics\n if self.use_semantics:\n self.mlp_semantics = tcnn.Network(\n n_input_dims=self.geo_feat_dim,\n n_output_dims=hidden_dim_transient,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n },\n )\n self.field_head_semantics = SemanticFieldHead(\n in_dim=self.mlp_semantics.n_output_dims, num_classes=num_semantic_classes\n )\n\n # predicted normals\n if self.use_pred_normals:\n self.mlp_pred_normals = tcnn.Network(\n n_input_dims=self.geo_feat_dim + self.position_encoding.n_output_dims,\n n_output_dims=hidden_dim_transient,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 2,\n },\n )\n self.field_head_pred_normals = PredNormalsFieldHead(in_dim=self.mlp_pred_normals.n_output_dims)\n\n self.mlp_head = tcnn.Network(\n n_input_dims=self.direction_encoding.n_output_dims + self.geo_feat_dim + (self.appearance_embedding_dim) * int(self.use_appearance_embedding),\n n_output_dims=3,\n network_config={\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"Sigmoid\",\n \"n_neurons\": hidden_dim_color,\n \"n_hidden_layers\": num_layers_color - 1,\n },\n )\n\n def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, TensorType]:\n \"\"\"Computes and returns the densities.\"\"\"\n if self.spatial_distortion is not None:\n positions = ray_samples.frustums.get_positions()\n positions = self.spatial_distortion(positions)\n positions = (positions + 2.0) / 4.0\n else:\n positions = SceneBox.get_normalized_positions(ray_samples.frustums.get_positions(), self.aabb)\n # Make sure the tcnn gets inputs between 0 and 1.\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1)\n positions = positions * selector[..., None]\n self._sample_locations = positions\n if not self._sample_locations.requires_grad:\n self._sample_locations.requires_grad = True\n positions_flat = positions.view(-1, 3)\n h = self.mlp_base(positions_flat).view(*ray_samples.frustums.shape, -1)\n density_before_activation, base_mlp_out = torch.split(h, [1, self.geo_feat_dim], dim=-1)\n self._density_before_activation = density_before_activation\n\n # Rectifying the density with an exponential is much more stable than a ReLU or\n # softplus, because it enables high post-activation (float32) density outputs\n # from smaller internal (float16) parameters.\n density = trunc_exp(density_before_activation.to(positions))\n density = density * selector[..., None]\n return density, base_mlp_out\n\n def get_outputs(\n self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None\n ) -> Dict[FieldHeadNames, TensorType]:\n assert density_embedding is not None\n outputs = {}\n if ray_samples.camera_indices is None:\n raise AttributeError(\"Camera indices are not provided.\")\n camera_indices = ray_samples.camera_indices.squeeze()\n directions = get_normalized_directions(ray_samples.frustums.directions)\n directions_flat = directions.view(-1, 3)\n d = self.direction_encoding(directions_flat)\n\n outputs_shape = ray_samples.frustums.directions.shape[:-1]\n\n # appearance\n if self.use_appearance_embedding:\n if self.training:\n embedded_appearance = self.embedding_appearance(camera_indices)\n else:\n if self.use_average_appearance_embedding:\n embedded_appearance = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim), device=directions.device\n ) * self.embedding_appearance.mean(dim=0)\n else:\n embedded_appearance = torch.zeros(\n (*directions.shape[:-1], self.appearance_embedding_dim), device=directions.device\n )\n\n # transients\n if self.use_transient_embedding and self.training:\n embedded_transient = self.embedding_transient(camera_indices)\n transient_input = torch.cat(\n [\n density_embedding.view(-1, self.geo_feat_dim),\n embedded_transient.view(-1, self.transient_embedding_dim),\n ],\n dim=-1,\n )\n x = self.mlp_transient(transient_input).view(*outputs_shape, -1).to(directions)\n outputs[FieldHeadNames.UNCERTAINTY] = self.field_head_transient_uncertainty(x)\n outputs[FieldHeadNames.TRANSIENT_RGB] = self.field_head_transient_rgb(x)\n outputs[FieldHeadNames.TRANSIENT_DENSITY] = self.field_head_transient_density(x)\n\n # semantics\n if self.use_semantics:\n semantics_input = density_embedding.view(-1, self.geo_feat_dim)\n if not self.pass_semantic_gradients:\n semantics_input = semantics_input.detach()\n\n x = self.mlp_semantics(semantics_input).view(*outputs_shape, -1).to(directions)\n outputs[FieldHeadNames.SEMANTICS] = self.field_head_semantics(x)\n\n # predicted normals\n if self.use_pred_normals:\n positions = ray_samples.frustums.get_positions()\n\n positions_flat = self.position_encoding(positions.view(-1, 3))\n pred_normals_inp = torch.cat([positions_flat, density_embedding.view(-1, self.geo_feat_dim)], dim=-1)\n\n x = self.mlp_pred_normals(pred_normals_inp).view(*outputs_shape, -1).to(directions)\n outputs[FieldHeadNames.PRED_NORMALS] = self.field_head_pred_normals(x)\n\n if self.use_appearance_embedding:\n h = torch.cat(\n [\n d,\n density_embedding.view(-1, self.geo_feat_dim),\n embedded_appearance.view(-1, self.appearance_embedding_dim),\n ],\n dim=-1,\n )\n else:\n h = torch.cat(\n [\n d,\n density_embedding.view(-1, self.geo_feat_dim),\n ],\n dim=-1,\n )\n\n rgb = self.mlp_head(h).view(*outputs_shape, -1).to(directions)\n outputs.update({FieldHeadNames.RGB: rgb})\n\n return outputs" }, { "identifier": "MSELoss", "path": "nerfstudio/model_components/losses.py", "snippet": "LOSSES = {\"L1\": L1Loss, \"MSE\": MSELoss}\nEPS = 1.0e-7\nURF_SIGMA_SCALE_FACTOR = 3.0\n DS_NERF = 1\n URF = 2\nclass DepthLossType(Enum):\nclass MiDaSMSELoss(nn.Module):\nclass GradientLoss(nn.Module):\nclass ScaleAndShiftInvariantLoss(nn.Module):\ndef outer(\n t0_starts: TensorType[..., \"num_samples_0\"],\n t0_ends: TensorType[..., \"num_samples_0\"],\n t1_starts: TensorType[..., \"num_samples_1\"],\n t1_ends: TensorType[..., \"num_samples_1\"],\n y1: TensorType[..., \"num_samples_1\"],\n) -> TensorType[..., \"num_samples_0\"]:\ndef lossfun_outer(\n t: TensorType[..., \"num_samples+1\"],\n w: TensorType[..., \"num_samples\"],\n t_env: TensorType[..., \"num_samples+1\"],\n w_env: TensorType[..., \"num_samples\"],\n):\ndef ray_samples_to_sdist(ray_samples):\ndef interlevel_loss(weights_list, ray_samples_list):\ndef lossfun_distortion(t, w):\ndef distortion_loss(weights_list, ray_samples_list):\ndef nerfstudio_distortion_loss(\n ray_samples: RaySamples,\n densities: TensorType[\"bs\":..., \"num_samples\", 1] = None,\n weights: TensorType[\"bs\":..., \"num_samples\", 1] = None,\n) -> TensorType[\"bs\":..., 1]:\ndef orientation_loss(\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n normals: TensorType[\"bs\":..., \"num_samples\", 3],\n viewdirs: TensorType[\"bs\":..., 3],\n):\ndef pred_normal_loss(\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n normals: TensorType[\"bs\":..., \"num_samples\", 3],\n pred_normals: TensorType[\"bs\":..., \"num_samples\", 3],\n):\ndef ds_nerf_depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n termination_depth: TensorType[..., 1],\n steps: TensorType[..., \"num_samples\", 1],\n lengths: TensorType[..., \"num_samples\", 1],\n sigma: TensorType[0],\n) -> TensorType[..., 1]:\ndef urban_radiance_field_depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n termination_depth: TensorType[..., 1],\n predicted_depth: TensorType[..., 1],\n steps: TensorType[..., \"num_samples\", 1],\n sigma: TensorType[0],\n) -> TensorType[..., 1]:\ndef depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n termination_depth: TensorType[..., 1],\n predicted_depth: TensorType[..., 1],\n sigma: TensorType[0],\n directions_norm: TensorType[..., 1],\n is_euclidean: bool,\n depth_loss_type: DepthLossType,\n) -> TensorType[0]:\ndef monosdf_normal_loss(\n normal_pred: TensorType[\"num_samples\", 3], normal_gt: TensorType[\"num_samples\", 3]\n) -> TensorType[0]:\n def __init__(self, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __init__(self, scales: int = 4, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def gradient_loss(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __init__(self, alpha: float = 0.5, scales: int = 4, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def compute_scale_and_shift(\n cls, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __get_prediction_ssi(self):" }, { "identifier": "ProposalNetworkSampler", "path": "nerfstudio/model_components/ray_samplers.py", "snippet": "class ProposalNetworkSampler(Sampler):\n \"\"\"Sampler that uses a proposal network to generate samples.\n\n Args:\n num_proposal_samples_per_ray: Number of samples to generate per ray for each proposal step.\n num_nerf_samples_per_ray: Number of samples to generate per ray for the NERF model.\n num_proposal_network_iterations: Number of proposal network iterations to run.\n single_jitter: Use a same random jitter for all samples along a ray.\n update_sched: A function that takes the iteration number of steps between updates.\n initial_sampler: Sampler to use for the first iteration. Uses UniformLinDispPiecewise if not set.\n \"\"\"\n\n def __init__(\n self,\n num_proposal_samples_per_ray: Tuple[int] = (64,),\n num_nerf_samples_per_ray: int = 32,\n num_proposal_network_iterations: int = 2,\n single_jitter: bool = False,\n update_sched: Callable = lambda x: 1,\n initial_sampler: Optional[Sampler] = None,\n ) -> None:\n super().__init__()\n self.num_proposal_samples_per_ray = num_proposal_samples_per_ray\n self.num_nerf_samples_per_ray = num_nerf_samples_per_ray\n self.num_proposal_network_iterations = num_proposal_network_iterations\n self.update_sched = update_sched\n if self.num_proposal_network_iterations < 1:\n raise ValueError(\"num_proposal_network_iterations must be >= 1\")\n\n # samplers\n if initial_sampler is None:\n self.initial_sampler = UniformLinDispPiecewiseSampler(single_jitter=single_jitter)\n else:\n self.initial_sampler = initial_sampler\n self.pdf_sampler = PDFSampler(include_original=False, single_jitter=single_jitter)\n\n self._anneal = 1.0\n self._steps_since_update = 0\n self._step = 0\n\n def set_anneal(self, anneal: float) -> None:\n \"\"\"Set the anneal value for the proposal network.\"\"\"\n self._anneal = anneal\n\n def step_cb(self, step):\n \"\"\"Callback to register a training step has passed. This is used to keep track of the sampling schedule\"\"\"\n self._step = step\n self._steps_since_update += 1\n\n def generate_ray_samples(\n self,\n ray_bundle: Optional[RayBundle] = None,\n density_fns: Optional[List[Callable]] = None,\n ) -> Tuple[RaySamples, List, List]:\n assert ray_bundle is not None\n assert density_fns is not None\n\n weights_list = []\n ray_samples_list = []\n\n n = self.num_proposal_network_iterations\n weights = None\n ray_samples = None\n updated = self._steps_since_update > self.update_sched(self._step) or self._step < 10\n for i_level in range(n + 1):\n is_prop = i_level < n\n num_samples = self.num_proposal_samples_per_ray[i_level] if is_prop else self.num_nerf_samples_per_ray\n if i_level == 0:\n # Uniform sampling because we need to start with some samples\n ray_samples = self.initial_sampler(ray_bundle, num_samples=num_samples)\n else:\n # PDF sampling based on the last samples and their weights\n # Perform annealing to the weights. This will be a no-op if self._anneal is 1.0.\n assert weights is not None\n annealed_weights = torch.pow(weights, self._anneal)\n ray_samples = self.pdf_sampler(ray_bundle, ray_samples, annealed_weights, num_samples=num_samples)\n if is_prop:\n if updated:\n # always update on the first step or the inf check in grad scaling crashes\n density = density_fns[i_level](ray_samples.frustums.get_positions())\n else:\n with torch.no_grad():\n density = density_fns[i_level](ray_samples.frustums.get_positions())\n weights = ray_samples.get_weights(density)\n weights_list.append(weights) # (num_rays, num_samples)\n ray_samples_list.append(ray_samples)\n if updated:\n self._steps_since_update = 0\n\n assert ray_samples is not None\n return ray_samples, weights_list, ray_samples_list" }, { "identifier": "AccumulationRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class AccumulationRenderer(nn.Module):\n \"\"\"Accumulated value along a ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 1]:\n \"\"\"Composite samples along ray and calculate accumulation.\n\n Args:\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of accumulated values.\n \"\"\"\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n accumulation = torch.sum(weights, dim=-2)\n return accumulation" }, { "identifier": "DepthRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class DepthRenderer(nn.Module):\n \"\"\"Calculate depth along ray.\n\n Depth Method:\n - median: Depth is set to the distance where the accumulated weight reaches 0.5.\n - expected: Expected depth along ray. Same procedure as rendering rgb, but with depth.\n\n Args:\n method: Depth calculation method.\n \"\"\"\n\n def __init__(self, method: Literal[\"median\", \"expected\"] = \"median\") -> None:\n super().__init__()\n self.method = method\n\n def forward(\n self,\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[..., 1]:\n \"\"\"Composite samples along ray and calculate depths.\n\n Args:\n weights: Weights for each sample.\n ray_samples: Set of ray samples.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of depth values.\n \"\"\"\n\n if self.method == \"median\":\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n raise NotImplementedError(\"Median depth calculation is not implemented for packed samples.\")\n cumulative_weights = torch.cumsum(weights[..., 0], dim=-1) # [..., num_samples]\n split = torch.ones((*weights.shape[:-2], 1), device=weights.device) * 0.5 # [..., 1]\n median_index = torch.searchsorted(cumulative_weights, split, side=\"left\") # [..., 1]\n median_index = torch.clamp(median_index, 0, steps.shape[-2] - 1) # [..., 1]\n median_depth = torch.gather(steps[..., 0], dim=-1, index=median_index) # [..., 1]\n return median_depth\n if self.method == \"expected\":\n eps = 1e-10\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n depth = nerfacc.accumulate_along_rays(weights, ray_indices, steps, num_rays)\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n depth = depth / (accumulation + eps)\n else:\n depth = torch.sum(weights * steps, dim=-2) / (torch.sum(weights, -2) + eps)\n\n depth = torch.clip(depth, steps.min(), steps.max())\n\n return depth\n\n raise NotImplementedError(f\"Method {self.method} not implemented\")" }, { "identifier": "RGBRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class RGBRenderer(nn.Module):\n \"\"\"Standard volumetric rendering.\n\n Args:\n background_color: Background color as RGB. Uses random colors if None.\n \"\"\"\n\n def __init__(self, background_color: Union[Literal[\"random\", \"last_sample\"], TensorType[3]] = \"random\") -> None:\n super().__init__()\n self.background_color = background_color\n\n @classmethod\n def combine_rgb(\n cls,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n background_color: Union[Literal[\"random\", \"white\", \"black\", \"last_sample\"], TensorType[3]] = \"random\",\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n background_color: Background color as RGB.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs rgb values.\n \"\"\"\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n if background_color == \"last_sample\":\n raise NotImplementedError(\"Background color 'last_sample' not implemented for packed samples.\")\n comp_rgb = nerfacc.accumulate_along_rays(weights, ray_indices, rgb, num_rays)\n accumulated_weight = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n comp_rgb = torch.sum(weights * rgb, dim=-2)\n accumulated_weight = torch.sum(weights, dim=-2)\n\n if BACKGROUND_COLOR_OVERRIDE is not None:\n background_color = BACKGROUND_COLOR_OVERRIDE\n if background_color == \"last_sample\":\n background_color = rgb[..., -1, :]\n if background_color == \"random\":\n background_color = torch.rand_like(comp_rgb).to(rgb.device)\n if isinstance(background_color, str) and background_color in colors.COLORS_DICT:\n background_color = colors.COLORS_DICT[background_color].to(rgb.device)\n\n assert isinstance(background_color, torch.Tensor)\n comp_rgb = comp_rgb + background_color.to(weights.device) * (1.0 - accumulated_weight)\n\n return comp_rgb\n\n def forward(\n self,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of rgb values.\n \"\"\"\n\n if not self.training:\n rgb = torch.nan_to_num(rgb)\n rgb = self.combine_rgb(\n rgb, weights, background_color=self.background_color, ray_indices=ray_indices, num_rays=num_rays\n )\n if not self.training:\n torch.clamp_(rgb, min=0.0, max=1.0)\n return rgb" }, { "identifier": "SemanticRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class SemanticRenderer(nn.Module):\n \"\"\"Calculate semantics along the ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n semantics: TensorType[\"bs\":..., \"num_samples\", \"num_classes\"],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ) -> TensorType[\"bs\":..., \"num_classes\"]:\n \"\"\"Calculate semantics along the ray.\"\"\"\n sem = torch.sum(weights * semantics, dim=-2)\n return sem" }, { "identifier": "UncertaintyRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class UncertaintyRenderer(nn.Module):\n \"\"\"Calculate uncertainty along the ray.\"\"\"\n\n @classmethod\n def forward(\n cls, betas: TensorType[\"bs\":..., \"num_samples\", 1], weights: TensorType[\"bs\":..., \"num_samples\", 1]\n ) -> TensorType[\"bs\":..., 1]:\n \"\"\"Calculate uncertainty along the ray.\n\n Args:\n betas: Uncertainty betas for each sample.\n weights: Weights of each sample.\n\n Returns:\n Rendering of uncertainty.\n \"\"\"\n uncertainty = torch.sum(weights * betas, dim=-2)\n return uncertainty" }, { "identifier": "NearFarCollider", "path": "nerfstudio/model_components/scene_colliders.py", "snippet": "class NearFarCollider(SceneCollider):\n \"\"\"Sets the nears and fars with fixed values.\n\n Args:\n near_plane: distance to near plane\n far_plane: distance to far plane\n \"\"\"\n\n def __init__(self, near_plane: float, far_plane: float, **kwargs) -> None:\n self.near_plane = near_plane\n self.far_plane = far_plane\n super().__init__(**kwargs)\n\n def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:\n ones = torch.ones_like(ray_bundle.origins[..., 0:1])\n near_plane = self.near_plane if self.training else 0\n ray_bundle.nears = ones * near_plane\n ray_bundle.fars = ones * self.far_plane\n return ray_bundle" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.render_aabb = None # the box that we want to render - should be a subset of scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n with Timer(\"forwarding\"):\n _t1 = time.time()\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n print(f\"forwarding took {time.time() - _t1} seconds\")\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n if output_name == \"mask_val\":\n outputs[\"mask_val\"] = torch.cat(outputs_list, dim=0)\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n\n def update_to_step(self, step: int) -> None:\n \"\"\"Called when loading a model from a checkpoint. Sets any model parameters that change over\n training to the correct value, based on the training step of the checkpoint.\n\n Args:\n step: training step of the loaded checkpoint\n \"\"\"" }, { "identifier": "NerfactoModelConfig", "path": "nerfstudio/models/nerfacto.py", "snippet": "class NerfactoModelConfig(ModelConfig):\n \"\"\"Nerfacto Model Config\"\"\"\n\n _target: Type = field(default_factory=lambda: NerfactoModel)\n near_plane: float = 0.05\n \"\"\"How far along the ray to start sampling.\"\"\"\n far_plane: float = 1000.0\n \"\"\"How far along the ray to stop sampling.\"\"\"\n background_color: Literal[\"random\", \"last_sample\", \"black\", \"white\"] = \"last_sample\"\n \"\"\"Whether to randomize the background color.\"\"\"\n hidden_dim: int = 64\n \"\"\"Dimension of hidden layers\"\"\"\n hidden_dim_color: int = 64\n \"\"\"Dimension of hidden layers for color network\"\"\"\n hidden_dim_transient: int = 64\n \"\"\"Dimension of hidden layers for transient network\"\"\"\n num_levels: int = 16\n \"\"\"Number of levels of the hashmap for the base mlp.\"\"\"\n max_res: int = 2048\n \"\"\"Maximum resolution of the hashmap for the base mlp.\"\"\"\n log2_hashmap_size: int = 19\n \"\"\"Size of the hashmap for the base mlp\"\"\"\n num_proposal_samples_per_ray: Tuple[int, ...] = (256, 96)\n \"\"\"Number of samples per ray for each proposal network.\"\"\"\n num_nerf_samples_per_ray: int = 48\n \"\"\"Number of samples per ray for the nerf network.\"\"\"\n proposal_update_every: int = 5\n \"\"\"Sample every n steps after the warmup\"\"\"\n proposal_warmup: int = 5000\n \"\"\"Scales n from 1 to proposal_update_every over this many steps\"\"\"\n num_proposal_iterations: int = 2\n \"\"\"Number of proposal network iterations.\"\"\"\n use_same_proposal_network: bool = False\n \"\"\"Use the same proposal network. Otherwise use different ones.\"\"\"\n proposal_net_args_list: List[Dict] = field(\n default_factory=lambda: [\n {\"hidden_dim\": 16, \"log2_hashmap_size\": 17, \"num_levels\": 5, \"max_res\": 128, \"use_linear\": False},\n {\"hidden_dim\": 16, \"log2_hashmap_size\": 17, \"num_levels\": 5, \"max_res\": 256, \"use_linear\": False},\n ]\n )\n \"\"\"Arguments for the proposal density fields.\"\"\"\n proposal_initial_sampler: Literal[\"piecewise\", \"uniform\"] = \"piecewise\"\n \"\"\"Initial sampler for the proposal network. Piecewise is preferred for unbounded scenes.\"\"\"\n interlevel_loss_mult: float = 1.0\n \"\"\"Proposal loss multiplier.\"\"\"\n distortion_loss_mult: float = 0.002\n \"\"\"Distortion loss multiplier.\"\"\"\n orientation_loss_mult: float = 0.0001\n \"\"\"Orientation loss multiplier on computed normals.\"\"\"\n pred_normal_loss_mult: float = 0.001\n \"\"\"Predicted normal loss multiplier.\"\"\"\n use_proposal_weight_anneal: bool = True\n \"\"\"Whether to use proposal weight annealing.\"\"\"\n use_average_appearance_embedding: bool = True\n \"\"\"Whether to use average appearance embedding or zeros for inference.\"\"\"\n proposal_weights_anneal_slope: float = 10.0\n \"\"\"Slope of the annealing function for the proposal weights.\"\"\"\n proposal_weights_anneal_max_num_iters: int = 1000\n \"\"\"Max num iterations for the annealing function.\"\"\"\n use_single_jitter: bool = True\n \"\"\"Whether use single jitter or not for the proposal networks.\"\"\"\n predict_normals: bool = False\n \"\"\"Whether to predict normals or not.\"\"\"\n disable_scene_contraction: bool = False\n \"\"\"Whether to disable scene contraction or not.\"\"\"\n\n \"\"\" feng add \"\"\"\n use_appearance_embedding: bool = True\n \"\"\" /feng add\"\"\"" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" } ]
from dataclasses import dataclass, field from typing import Dict, List, Tuple, Type from torch.nn import Parameter from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.dataparsers.base_dataparser import Semantics from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.field_components.field_heads import FieldHeadNames from nerfstudio.field_components.spatial_distortions import SceneContraction from nerfstudio.fields.density_fields import HashMLPDensityField from nerfstudio.fields.nerfacto_field import TCNNNerfactoField from nerfstudio.model_components.losses import MSELoss, distortion_loss, interlevel_loss from nerfstudio.model_components.ray_samplers import ProposalNetworkSampler from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, RGBRenderer, SemanticRenderer, UncertaintyRenderer, ) from nerfstudio.model_components.scene_colliders import NearFarCollider from nerfstudio.models.base_model import Model from nerfstudio.models.nerfacto import NerfactoModelConfig from nerfstudio.utils import colormaps import numpy as np import torch
14,794
# Copyright 2022 The nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF-W implementation which should be fast enough to view in the viewer. """ from __future__ import annotations @dataclass class SemanticNerfWModelConfig(NerfactoModelConfig): """Nerfacto Model Config""" _target: Type = field(default_factory=lambda: SemanticNerfWModel) use_transient_embedding: bool = False """Whether to use transient embedding.""" semantic_loss_weight: float = 1.0 pass_semantic_gradients: bool = False class SemanticNerfWModel(Model): """Nerfacto model Args: config: Nerfacto configuration to instantiate model """ config: SemanticNerfWModelConfig def __init__(self, config: SemanticNerfWModelConfig, metadata: Dict, **kwargs) -> None: assert "semantics" in metadata.keys() and isinstance(metadata["semantics"], Semantics) self.semantics = metadata["semantics"] super().__init__(config=config, **kwargs) self.colormap = self.semantics.colors.clone().detach().to(self.device) def populate_modules(self): """Set the fields and modules.""" super().populate_modules() scene_contraction = SceneContraction(order=float("inf")) if self.config.use_transient_embedding: raise ValueError("Transient embedding is not fully working for semantic nerf-w.") # Fields self.field = TCNNNerfactoField( self.scene_box.aabb, num_levels=self.config.num_levels, max_res=self.config.max_res, log2_hashmap_size=self.config.log2_hashmap_size, spatial_distortion=scene_contraction, num_images=self.num_train_data, use_average_appearance_embedding=self.config.use_average_appearance_embedding, use_transient_embedding=self.config.use_transient_embedding, use_semantics=True, num_semantic_classes=len(self.semantics.classes), pass_semantic_gradients=self.config.pass_semantic_gradients, ) # Build the proposal network(s) self.proposal_networks = torch.nn.ModuleList() if self.config.use_same_proposal_network: network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for _ in range(self.config.num_proposal_iterations)] else: for _ in range(self.config.num_proposal_iterations): network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Collider self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Samplers self.proposal_sampler = ProposalNetworkSampler( num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=self.config.num_proposal_samples_per_ray, num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, ) # renderers self.renderer_rgb = RGBRenderer(background_color=self.config.background_color) self.renderer_accumulation = AccumulationRenderer()
# Copyright 2022 The nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Semantic NeRF-W implementation which should be fast enough to view in the viewer. """ from __future__ import annotations @dataclass class SemanticNerfWModelConfig(NerfactoModelConfig): """Nerfacto Model Config""" _target: Type = field(default_factory=lambda: SemanticNerfWModel) use_transient_embedding: bool = False """Whether to use transient embedding.""" semantic_loss_weight: float = 1.0 pass_semantic_gradients: bool = False class SemanticNerfWModel(Model): """Nerfacto model Args: config: Nerfacto configuration to instantiate model """ config: SemanticNerfWModelConfig def __init__(self, config: SemanticNerfWModelConfig, metadata: Dict, **kwargs) -> None: assert "semantics" in metadata.keys() and isinstance(metadata["semantics"], Semantics) self.semantics = metadata["semantics"] super().__init__(config=config, **kwargs) self.colormap = self.semantics.colors.clone().detach().to(self.device) def populate_modules(self): """Set the fields and modules.""" super().populate_modules() scene_contraction = SceneContraction(order=float("inf")) if self.config.use_transient_embedding: raise ValueError("Transient embedding is not fully working for semantic nerf-w.") # Fields self.field = TCNNNerfactoField( self.scene_box.aabb, num_levels=self.config.num_levels, max_res=self.config.max_res, log2_hashmap_size=self.config.log2_hashmap_size, spatial_distortion=scene_contraction, num_images=self.num_train_data, use_average_appearance_embedding=self.config.use_average_appearance_embedding, use_transient_embedding=self.config.use_transient_embedding, use_semantics=True, num_semantic_classes=len(self.semantics.classes), pass_semantic_gradients=self.config.pass_semantic_gradients, ) # Build the proposal network(s) self.proposal_networks = torch.nn.ModuleList() if self.config.use_same_proposal_network: network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for _ in range(self.config.num_proposal_iterations)] else: for _ in range(self.config.num_proposal_iterations): network = HashMLPDensityField(self.scene_box.aabb, spatial_distortion=scene_contraction) self.proposal_networks.append(network) self.density_fns = [network.density_fn for network in self.proposal_networks] # Collider self.collider = NearFarCollider(near_plane=self.config.near_plane, far_plane=self.config.far_plane) # Samplers self.proposal_sampler = ProposalNetworkSampler( num_nerf_samples_per_ray=self.config.num_nerf_samples_per_ray, num_proposal_samples_per_ray=self.config.num_proposal_samples_per_ray, num_proposal_network_iterations=self.config.num_proposal_iterations, single_jitter=self.config.use_single_jitter, ) # renderers self.renderer_rgb = RGBRenderer(background_color=self.config.background_color) self.renderer_accumulation = AccumulationRenderer()
self.renderer_depth = DepthRenderer()
12
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 1\n self.device = torch.device(device)\n\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = n_samples\n\n self.data = {}\n self.process_molecules(\"raw_dataset\", n_samples, idx=0)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "ProcessedDoubleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedDoubleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=1,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 2\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 2)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1sthalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_N_idx_1sthalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag1_N_idx_2ndhalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_O_idx_2ndhalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1sthalf],\n self.hasN_set[key][frag1_N_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1sthalf],\n self.hasO_set[key][frag2_O_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTripleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedTripleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n self.process_molecules(\"frag3_data\", n_samples, idx=2)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n n1 = int(self.n_samples / 3)\n n2 = int(self.n_samples / 3)\n n3 = self.n_samples - n1 - n2\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag2_N_idx_1_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag3_F_idx_1_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag1_F_idx_2_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag2_O_idx_2_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag3_N_idx_2_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag1_N_idx_3_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag2_F_idx_3_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag3_O_idx_3_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n3,\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1_3],\n self.hasF_set[key][frag1_F_idx_2_3],\n self.hasN_set[key][frag1_N_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1_3],\n self.hasO_set[key][frag2_O_idx_2_3],\n self.hasF_set[key][frag2_F_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag3_data = {\n key: np.concatenate(\n [\n self.hasF_set[key][frag3_F_idx_1_3],\n self.hasN_set[key][frag3_N_idx_2_3],\n self.hasO_set[key][frag3_O_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "EGNNDynamics", "path": "oa_reactdiff/dynamics/egnn_dynamics.py", "snippet": "class EGNNDynamics(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n def forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tuple[List[Tensor], Tensor]:\n r\"\"\"predict noise /mu.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tuple[List[Tensor], Tensor]: updated pos-h and edge attributes\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n h_final, pos_final, edge_attr_final = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n )\n vel = pos_final - pos\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in pos, resetting EGNN output to randn.\")\n vel = torch.randn_like(vel)\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in h, resetting EGNN output to randn.\")\n h_final = torch.randn_like(h_final)\n\n h_final = h_final[:, :-condition_dim]\n\n frag_index = self.compute_frag_index(n_frag_switch)\n xh_final = [\n torch.cat(\n [\n self.remove_mean_batch(\n vel[frag_index[ii] : frag_index[ii + 1]],\n combined_mask[frag_index[ii] : frag_index[ii + 1]],\n ),\n self.decoders[ii](h_final[frag_index[ii] : frag_index[ii + 1]]),\n ],\n dim=-1,\n )\n for ii, name in enumerate(self.fragment_names)\n ]\n\n # xh_final = self.enpose_pbc(xh_final)\n\n if edge_attr_final is None or edge_attr_final.size(1) <= max(1, self.dist_dim):\n edge_attr_final = None\n else:\n edge_attr_final = self.edge_decoder(edge_attr_final)\n return xh_final, edge_attr_final\n\n @staticmethod\n def enpose_pbc(xh: List[Tensor], magnitude=10.0) -> List[Tensor]:\n xrange = magnitude * 2\n xh = [torch.remainder(_xh + magnitude, xrange) - magnitude for _xh in xh]\n return xh\n\n @staticmethod\n def compute_frag_index(n_frag_switch: Tensor) -> np.ndarray:\n counts = [\n torch.where(n_frag_switch == ii)[0].numel()\n for ii in torch.unique(n_frag_switch)\n ]\n return np.concatenate([np.array([0]), np.cumsum(counts)])\n\n @torch.no_grad()\n def adjust_edge_attr_on_new_eij(\n self,\n edge_index: Tensor,\n edge_attr: Tensor,\n edge_index_new: Tensor,\n ) -> Tensor:\n r\"\"\"Get ready new edge attributes (e_ij) given old {ij, e_ij} and new {ij}\n\n Args:\n edge_index (Tensor): ij\n edge_attr (Tensor): e_ij\n edge_index_new (Tensor): new ij\n\n Raises:\n ValueError: finding multiple entries for the same ij pair\n\n Returns:\n Tensor: new e_ij\n \"\"\"\n edge_index_T = torch.transpose(edge_index, 1, 0)\n edge_index_new_T = torch.transpose(edge_index_new, 1, 0)\n\n edge_attr_new = []\n for _ind, ij in enumerate(edge_index_new_T):\n ind = torch.where((ij == edge_index_T).all(dim=1))[0]\n if ind.size(0) > 1:\n raise ValueError(f\"ind should only be 0 or 1, getting {ind}\")\n\n if ind.size(0) == 0:\n self.create_new_edge_attr(\n ind_new=_ind,\n ij_new=ij,\n edge_index_new_T=edge_index_new_T,\n edge_attr_new=edge_attr_new,\n edge_attr=edge_attr,\n )\n else:\n edge_attr_new.append(edge_attr[ind.item()].detach())\n return torch.stack(edge_attr_new, dim=0)\n\n @staticmethod\n def init_edge_attr(sample_edge_attr):\n r\"\"\"initialize edge attributes.\"\"\"\n return torch.rand_like(sample_edge_attr)\n\n def create_new_edge_attr(\n self,\n ind_new: Tensor,\n ij_new: Tensor,\n edge_index_new_T: Tensor,\n edge_attr_new: List[Tensor],\n edge_attr: Tensor,\n ) -> List[Tensor]:\n r\"\"\"Create new edge attrbution for ij that is not present in old connections\n\n Args:\n ind_new (Tensor): natural index of new ij\n ij_new (Tensor): new ij\n edge_index_new_T (Tensor): new edge indexes, [n_edge, 2]\n edge_attr_new (List[Tensor]): list of new edge attributes\n edge_attr (Tensor): old edge attributes\n\n Raises:\n ValueError: not ji found for ij in new indexes\n\n Returns:\n List[Tensor]: list of new edge attributes\n \"\"\"\n ij_new_reverse = ij_new[torch.tensor([1, 0])]\n ind_new_reverse = torch.where((ij_new_reverse == edge_index_new_T).all(dim=1))[\n 0\n ]\n print(ind_new_reverse)\n if ind_new_reverse.size(0) == 0:\n raise ValueError(f\"should always find a reverse ind.\")\n # print(ij_new, ind_new, ind_new_reverse)\n if ind_new_reverse.item() >= ind_new:\n edge_attr_new.append(self.init_edge_attr(edge_attr[0]))\n else:\n edge_attr_new.append(edge_attr_new[ind_new_reverse.item()])\n return edge_attr_new\n\n @staticmethod\n def remove_mean_batch(x, indices):\n mean = scatter_mean(x, indices, dim=0)\n x = x - mean[indices]\n return x" }, { "identifier": "Confidence", "path": "oa_reactdiff/dynamics/confidence.py", "snippet": "class Confidence(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n **kwargs,\n ) -> None:\n r\"\"\"Confindence score for generated samples.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n model_config.update({\"for_conf\": True})\n update_pocket_coords = True\n condition_time = (True,)\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n hidden_channels = model_config[\"hidden_channels\"]\n self.readout = GatedMLP(\n in_dim=hidden_channels,\n out_dims=[hidden_channels, hidden_channels, 1],\n activation=\"swish\",\n bias=True,\n last_layer_no_activation=True,\n )\n\n def _forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tensor:\n r\"\"\"predict confidence.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tensor: binary probability of confidence fo each graph.\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n node_features = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n ) # (n_node, n_hidden)\n\n graph_features = scatter_mean(\n node_features,\n index=combined_mask,\n dim=0,\n ) # (n_system, n_hidden)\n conf = self.readout(graph_features)\n return conf.squeeze()\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n ):\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n pred = self._forward(\n xh=xh,\n edge_index=edge_index,\n t=torch.tensor([0]),\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None,\n )\n return pred" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "Normalizer", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "class Normalizer(nn.Module):\n def __init__(\n self,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n pos_dim: int = 3,\n ) -> None:\n super().__init__()\n self.norm_values = norm_values\n self.norm_biases = norm_biases\n self.pos_dim = pos_dim\n\n def normalize(self, representations: List[Dict]) -> List[Dict]:\n for ii in range(len(representations)):\n for jj, feature_type in enumerate(FEATURE_MAPPING):\n representations[ii][feature_type] = (\n representations[ii][feature_type] - self.norm_biases[jj]\n ) / self.norm_values[jj]\n return representations\n\n def unnormalize(self, x: Tensor, ind: int) -> Tensor:\n return x * self.norm_values[ind] + self.norm_biases[ind]\n\n def unnormalize_z(self, z_combined: List[Tensor]) -> List[Tensor]:\n for ii in range(len(z_combined)):\n z_combined[ii][:, : self.pos_dim] = self.unnormalize(\n z_combined[ii][:, : self.pos_dim], 0\n )\n z_combined[ii][:, self.pos_dim : -1] = self.unnormalize(\n z_combined[ii][:, self.pos_dim : -1], 1\n )\n z_combined[ii][:, -1:] = self.unnormalize(z_combined[ii][:, -1:], 2)\n return z_combined" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "EnVariationalDiffusion", "path": "oa_reactdiff/diffusion/en_diffusion.py", "snippet": "class EnVariationalDiffusion(nn.Module):\n \"\"\"\n The E(n) Diffusion Module.\n \"\"\"\n\n def __init__(\n self,\n dynamics: EGNNDynamics,\n schdule: DiffSchedule,\n normalizer: Normalizer,\n size_histogram: Optional[Dict] = None,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n fixed_idx: Optional[List] = None,\n ):\n super().__init__()\n assert loss_type in {\"vlb\", \"l2\"}\n\n self.dynamics = dynamics\n self.schedule = schdule\n self.normalizer = normalizer\n self.size_histogram = size_histogram\n self.loss_type = loss_type\n self.pos_only = pos_only\n self.fixed_idx = fixed_idx or []\n\n self.pos_dim = dynamics.pos_dim\n self.node_nfs = dynamics.node_nfs\n self.fragment_names = dynamics.fragment_names\n self.T = schdule.gamma_module.timesteps\n self.norm_values = normalizer.norm_values\n self.norm_biases = normalizer.norm_biases\n\n # ------ FORWARD PASS ------\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n return_pred: bool = False,\n ):\n r\"\"\"\n Computes the loss and NLL terms.\n\n #TODO: edge_attr not considered at all\n \"\"\"\n num_sample = representations[0][\"size\"].size(0)\n n_nodes = torch.stack(\n [repr[\"size\"] for repr in representations],\n dim=0,\n ).sum(dim=0)\n device = representations[0][\"pos\"].device\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n # Normalize data, take into account volume change in x.\n representations = self.normalizer.normalize(representations)\n\n # Likelihood change due to normalization\n delta_log_px = self.delta_log_px(n_nodes.sum())\n\n # Sample a timestep t for each example in batch\n # At evaluation time, loss_0 will be computed separately to decrease\n # variance in the estimator (costs two forward passes)\n lowest_t = 0 if self.training else 1\n t_int = torch.randint(\n lowest_t, self.T + 1, size=(num_sample, 1), device=device\n ).float()\n s_int = t_int - 1 # previous timestep\n\n # Masks: important to compute log p(x | z0).\n t_is_zero = (t_int == 0).float()\n t_is_not_zero = 1 - t_is_zero\n\n # Normalize t to [0, 1]. Note that the negative\n # step of s will never be used, since then p(x | z0) is computed.\n s = s_int / self.T\n t = t_int / self.T\n\n # Compute gamma_s and gamma_t via the network.\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s), representations[0][\"pos\"]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t), representations[0][\"pos\"]\n )\n\n # Concatenate x, and h[categorical].\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n # Find noised representation\n z_t, eps_xh = self.noised_representation(xh, masks, gamma_t)\n\n # Neural net prediction.\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z_t,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n if return_pred:\n return eps_xh, net_eps_xh\n\n # TODO: LJ term not implemented\n # xh_lig_hat = self.xh_given_zt_and_epsilon(z_t_lig, net_out_lig, gamma_t,\n # ligand['mask'])\n if self.pos_only:\n for ii in range(len(masks)):\n net_eps_xh[ii][:, self.pos_dim :] = torch.zeros_like(\n net_eps_xh[ii][:, self.pos_dim :],\n device=device,\n )\n # Compute the L2 error.\n error_t: List[Tensor] = [\n utils.sum_except_batch(\n (eps_xh[ii] - net_eps_xh[ii]) ** 2,\n masks[ii],\n dim_size=num_sample,\n )\n for ii in range(len(masks))\n ] # TODO: no edge_attr contribution\n\n # Compute weighting with SNR: (1 - SNR(s-t)) for epsilon parametrization\n SNR_weight = (1 - self.schedule.SNR(gamma_s - gamma_t)).squeeze(1)\n assert error_t[0].size() == SNR_weight.size()\n\n # The _constants_ depending on sigma_0 from the\n # cross entropy term E_q(z0 | x) [log p(x | z0)].\n neg_log_constants = -self.log_constants_p_x_given_z0(\n n_nodes=n_nodes, device=device\n )\n\n # The KL between q(zT | x) and p(zT) = Normal(0, 1).\n # Should be close to zero.\n # kl_prior = self.kl_prior_with_pocket(\n # xh_lig, xh_pocket, ligand['mask'], pocket['mask'],\n # ligand['size'] + pocket['size'])\n # TODO: approximate KL prior with zero now, which should not influence training.\n kl_prior = torch.zeros_like(neg_log_constants)\n\n if self.training:\n # Computes the L_0 term (even if gamma_t is not actually gamma_0)\n # and this will later be selected via masking.\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_t,\n eps_xh=eps_xh,\n net_eps_xh=net_eps_xh,\n gamma_t=gamma_t,\n epsilon=1e-10,\n )\n loss_0_x = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[0]\n ]\n loss_0_cat = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[1]\n ]\n loss_0_charge = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n # apply t_is_zero mask\n error_t = [_error_t * t_is_not_zero.squeeze() for _error_t in error_t]\n\n else:\n # Compute noise values for t = 0.\n t_zeros = torch.zeros_like(s)\n gamma_0 = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_zeros), representations[0][\"pos\"]\n )\n\n # Sample z_0 given x, h for timestep t, from q(z_t | x, h)\n z_0, eps_0_xh = self.noised_representation(xh, masks, gamma_0)\n net_eps_0_xh, net_eps_0_edge_attr = self.dynamics(\n xh=z_0,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_0,\n eps_xh=eps_0_xh,\n net_eps_xh=net_eps_0_xh,\n gamma_t=gamma_0,\n epsilon=1e-10,\n )\n loss_0_x = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[0]]\n loss_0_cat = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[1]]\n loss_0_charge = [\n -_log_p_fragment for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n loss_terms = {\n \"delta_log_px\": delta_log_px,\n \"error_t\": error_t,\n \"SNR_weight\": SNR_weight,\n \"loss_0_x\": loss_0_x,\n \"loss_0_cat\": loss_0_cat,\n \"loss_0_charge\": loss_0_charge,\n \"neg_log_constants\": neg_log_constants,\n \"kl_prior\": kl_prior,\n \"log_pN\": torch.zeros_like(kl_prior),\n \"t_int\": t_int.squeeze(),\n \"net_eps_xh\": net_eps_xh,\n \"eps_xh\": eps_xh,\n }\n return loss_terms\n\n def delta_log_px(self, num_nodes):\n return -self.subspace_dimensionality(num_nodes) * np.log(self.norm_values[0])\n\n def subspace_dimensionality(self, input_size):\n r\"\"\"\n Compute the dimensionality on translation-invariant linear subspace\n where distributions on x are defined.\n \"\"\"\n return (input_size - 1) * self.pos_dim\n\n def noised_representation(\n self,\n xh: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n ) -> Tuple[List[Tensor], List[Tensor]]:\n # Compute alpha_t and sigma_t from gamma.\n alpha_t = self.schedule.alpha(gamma_t, xh[0])\n sigma_t = self.schedule.sigma(gamma_t, xh[0])\n\n # Sample zt ~ Normal(alpha_t x, sigma_t)\n eps_xh = self.sample_combined_position_feature_noise(masks)\n\n # Sample z_t given x, h for timestep t, from q(z_t | x, h)\n z_t = [\n alpha_t[masks[ii]] * xh[ii] + sigma_t[masks[ii]] * eps_xh[ii]\n for ii in range(len(masks))\n ]\n\n return z_t, eps_xh\n\n def sample_combined_position_feature_noise(\n self,\n masks: List[Tensor],\n ) -> List[Tensor]:\n r\"\"\"\n Samples mean-centered normal noise for z_x, and standard normal noise for z_h.\n Note that we only need to put the center of gravity of *each fragment* to the origin.\n \"\"\"\n eps_xh = []\n for ii, mask in enumerate(masks):\n _eps_x = utils.sample_center_gravity_zero_gaussian_batch(\n size=(len(mask), self.pos_dim),\n indices=[mask],\n )\n _eps_h = utils.sample_gaussian(\n size=(len(mask), self.node_nfs[ii] - self.pos_dim),\n device=mask.device,\n )\n if self.pos_only:\n _eps_h = torch.zeros_like(_eps_h, device=mask.device)\n eps_xh.append(torch.cat([_eps_x, _eps_h], dim=1))\n for idx in self.fixed_idx:\n eps_xh[idx] = torch.zeros_like(eps_xh[idx], device=mask.device)\n return eps_xh\n\n def log_constants_p_x_given_z0(self, n_nodes, device):\n r\"\"\"Computes p(x|z0).\"\"\"\n\n batch_size = len(n_nodes)\n degrees_of_freedom_x = self.subspace_dimensionality(n_nodes).to(device)\n\n zeros = torch.zeros((batch_size, 1), device=device)\n gamma_0 = self.schedule.gamma_module(zeros)\n\n # Recall that sigma_x = sqrt(sigma_0^2 / alpha_0^2) = SNR(-0.5 gamma_0).\n log_sigma_x = 0.5 * gamma_0.view(batch_size)\n return degrees_of_freedom_x * (-log_sigma_x - 0.5 * np.log(2 * np.pi))\n\n def kl_prior(self):\n return NotImplementedError\n\n @staticmethod\n def gaussian_KL(q_mu_minus_p_mu_squared, q_sigma, p_sigma, d):\n \"\"\"Computes the KL distance between two normal distributions.\n Args:\n q_mu_minus_p_mu_squared: Squared difference between mean of\n distribution q and distribution p: ||mu_q - mu_p||^2\n q_sigma: Standard deviation of distribution q.\n p_sigma: Standard deviation of distribution p.\n d: dimension\n Returns:\n The KL distance\n \"\"\"\n return (\n d * torch.log(p_sigma / q_sigma)\n + 0.5 * (d * q_sigma**2 + q_mu_minus_p_mu_squared) / (p_sigma**2)\n - 0.5 * d\n )\n\n def log_pxh_given_z0_without_constants(\n self,\n representations: List[Dict],\n z_t: List[Tensor],\n eps_xh: List[Tensor],\n net_eps_xh: List[Tensor],\n gamma_t: Tensor,\n epsilon: float = 1e-10,\n ) -> List[List[Tensor]]:\n # Compute sigma_0 and rescale to the integer scale of the data.\n # for pos\n log_p_x_given_z0_without_constants = [\n -0.5\n * (\n utils.sum_except_batch(\n (eps_xh[ii][:, : self.pos_dim] - net_eps_xh[ii][:, : self.pos_dim])\n ** 2,\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n )\n for ii in range(len(representations))\n ]\n\n # only keep first several elements\n z_t = [_z_t[:, : 3 + 5 + 1] for _z_t in z_t]\n for ii, repr in enumerate(representations):\n representations[ii][\"charge\"] = representations[ii][\"charge\"][:, :1]\n # for ohe of atom types\n sigma_0 = self.schedule.sigma(gamma_t, target_tensor=z_t[0])\n sigma_0_cat = sigma_0 * self.normalizer.norm_values[1]\n atoms = [\n self.normalizer.unnormalize(repr[\"one_hot\"], ind=1)\n for repr in representations\n ]\n est_atoms = [\n self.normalizer.unnormalize(_z_t[:, self.pos_dim : -1], ind=1)\n for _z_t in z_t\n ]\n centered_atoms = [_est_atoms - 1 for _est_atoms in est_atoms]\n log_ph_cat_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_atoms[ii] + 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_atoms[ii] - 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_probabilities = [\n _log_ph_cat_proportionals\n - torch.logsumexp(\n _log_ph_cat_proportionals,\n dim=1,\n keepdim=True,\n )\n for _log_ph_cat_proportionals in log_ph_cat_proportionals\n ]\n log_p_hcat_given_z0 = [\n utils.sum_except_batch(\n log_probabilities[ii] * atoms[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n # for atom charge\n sigma_0_charge = sigma_0 * self.normalizer.norm_values[2]\n charges = [\n self.normalizer.unnormalize(repr[\"charge\"], ind=2)\n for repr in representations\n ]\n est_charges = [\n self.normalizer.unnormalize(_z_t[:, -1:], ind=2).long() for _z_t in z_t\n ]\n for ii in range(len(representations)):\n assert charges[ii].size() == est_charges[ii].size()\n centered_charges = [\n charges[ii] - est_charges[ii] for ii in range(len(representations))\n ]\n log_ph_charge_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_charges[ii] + 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_charges[ii] - 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_p_hcharge_given_z0 = [\n utils.sum_except_batch(\n log_ph_charge_proportionals[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n log_p_h_given_z0 = [\n log_p_x_given_z0_without_constants,\n log_p_hcat_given_z0,\n log_p_hcharge_given_z0,\n ]\n return log_p_h_given_z0\n\n # ------ INVERSE PASS ------\n\n @torch.no_grad()\n def sample(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n timesteps: Optional[int] = None,\n h0: Optional[List[Tensor]] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert h0 is not None if self.pos_only else True\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.\n for s in reversed(range(0, timesteps)):\n s_array = torch.full((n_samples, 1), fill_value=s, device=zt_xh[0].device)\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n # print(s, zt_xh)\n\n zt_xh = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n # save frame\n if (s * return_frames) % timesteps == 0:\n idx = (s * return_frames) // timesteps\n out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zs_given_zt(\n self,\n s: Tensor,\n t: Tensor,\n zt_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ):\n \"\"\"Samples from zs ~ p(zs | zt). Only used during sampling.\"\"\"\n gamma_s = self.schedule.gamma_module(s)\n gamma_t = self.schedule.gamma_module(t)\n\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zt_xh[0])\n\n sigma_s = self.schedule.sigma(gamma_s, target_tensor=zt_xh[0])\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=zt_xh[0])\n\n # Neural net prediction.\n combined_mask = torch.cat(masks)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=zt_xh,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_net_eps_xh[:, : self.pos_dim] for _net_eps_xh in net_eps_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n # Note: mu_{t->s} = 1 / alpha_{t|s} z_t - sigma_{t|s}^2 / sigma_t / alpha_{t|s} epsilon\n # follows from the definition of mu_{t->s} and Equ. (7) in the EDM paper\n mu = [\n zt_xh[ii] / alpha_t_given_s[masks[ii]]\n - net_eps_xh[ii] * (sigma2_t_given_s / alpha_t_given_s / sigma_t)[masks[ii]]\n for ii in range(len(zt_xh))\n ]\n\n # Compute sigma for p(zs | zt).\n sigma = sigma_t_given_s * sigma_s / sigma_t\n\n # Sample zs given the paramters derived from zt.\n zs_xh = self.sample_normal(mu=mu, sigma=sigma, masks=masks, fix_noise=fix_noise)\n\n # Project down to avoid numerical runaway of the center of gravity.\n for ii in range(len(masks)):\n zs_xh[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zs_xh[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zs_xh\n\n def sample_normal(\n self,\n mu: List[Tensor],\n sigma: Tensor,\n masks: List[Tensor],\n fix_noise: bool = False,\n ) -> List[Tensor]:\n r\"\"\"Samples from a Normal distribution.\"\"\"\n if fix_noise:\n # bs = 1 if fix_noise else mu.size(0)\n raise NotImplementedError(\"fix_noise option isn't implemented yet\")\n eps_xh = self.sample_combined_position_feature_noise(masks=masks)\n zs_xh = [mu[ii] + sigma[masks[ii]] * eps_xh[ii] for ii in range(len(masks))]\n return zs_xh\n\n def sample_p_xh_given_z0(\n self,\n z0_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n batch_size: int,\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ) -> Tuple[List[Tensor]]:\n \"\"\"Samples x ~ p(x|z0).\"\"\"\n t_zeros = torch.zeros(size=(batch_size, 1), device=z0_xh[0].device)\n gamma_0 = self.schedule.gamma_module(t_zeros)\n # Computes sqrt(sigma_0^2 / alpha_0^2)\n sigma_x = self.schedule.SNR(-0.5 * gamma_0)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z0_xh,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=torch.cat(masks),\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n # Compute mu for p(zs | zt).\n mu_x = self.compute_x_pred(\n net_eps_xh=net_eps_xh,\n zt_xh=z0_xh,\n gamma_t=gamma_0,\n masks=masks,\n )\n x0_xh = self.sample_normal(\n mu=mu_x, sigma=sigma_x, masks=masks, fix_noise=fix_noise\n )\n\n pos_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, : self.pos_dim], ii)\n for ii in range(len(masks))\n ]\n cat_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, self.pos_dim : -1], ii)\n for ii in range(len(masks))\n ]\n charge_0 = [\n torch.round(self.normalizer.unnormalize(x0_xh[ii][:, -1:], ii)).long()\n for ii in range(len(masks))\n ]\n\n cat_0 = [\n F.one_hot(torch.argmax(cat_0[ii], dim=1), self.node_nfs[ii] - 4).long()\n for ii in range(len(masks))\n ]\n return pos_0, cat_0, charge_0\n\n def compute_x_pred(\n self,\n net_eps_xh: List[Tensor],\n zt_xh: List[Tensor],\n gamma_t: Tensor,\n masks: List[Tensor],\n ) -> List[Tensor]:\n \"\"\"Commputes x_pred, i.e. the most likely prediction of x.\"\"\"\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=net_eps_xh[0])\n alpha_t = self.schedule.alpha(gamma_t, target_tensor=net_eps_xh[0])\n x_pred = [\n 1.0 / alpha_t[masks[ii]] * (zt_xh[ii] - sigma_t[masks[ii]] * net_eps_xh[ii])\n for ii in range(len(masks))\n ]\n return x_pred\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint_fixed(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zt_given_zs(\n self,\n zs: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n gamma_s: Tensor,\n fix_noise: bool = False,\n ) -> List[Tensor]:\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zs[0])\n\n mu = [alpha_t_given_s[masks[ii]] * zs[ii] for ii in range(len(masks))]\n zt = self.sample_normal(\n mu=mu, sigma=sigma_t_given_s, masks=masks, fix_noise=fix_noise\n )\n\n for ii in range(len(masks)):\n zt[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zt[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zt" }, { "identifier": "average_over_batch_metrics", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def average_over_batch_metrics(batch_metrics: List[Dict], allowed: List = []):\n epoch_metrics = {}\n effective_batch = {}\n for ii, out in enumerate(batch_metrics):\n for k, v in out.items():\n if not (k in allowed or len(allowed) == 0):\n continue\n if ii == 0:\n epoch_metrics[k] = v\n effective_batch[k] = 1\n else:\n if not np.isnan(v):\n epoch_metrics[k] += v\n effective_batch[k] += 1\n for k in epoch_metrics:\n epoch_metrics[k] /= effective_batch[k]\n return epoch_metrics" }, { "identifier": "pretty_print", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def pretty_print(epoch, metric_dict, prefix=\"Train\"):\n out = f\"{prefix} epoch {epoch} \"\n for k, v in metric_dict.items():\n out += f\"{k} {v:.2f} \"\n print(out)" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" } ]
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,055
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
self.ddpm = EnVariationalDiffusion(
10
2023-10-30 02:53:38+00:00
24k
lewandofskee/DiAD
sgn/sgn.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n # if level == 3:\n # layers.append(Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=True))\n # else:\n # layers.append(Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=False))\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=True)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "TimestepEmbedSequential", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class TimestepEmbedSequential(nn.Sequential, TimestepBlock):\n \"\"\"\n A sequential module that passes timestep embeddings to the children that\n support it as an extra input.\n \"\"\"\n\n def forward(self, x, emb, context=None):\n for layer in self:\n if isinstance(layer, TimestepBlock):\n x = layer(x, emb)\n elif isinstance(layer, SpatialTransformer):\n x = layer(x, context)\n else:\n x = layer(x)\n return x" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class ResBlock(TimestepBlock):\n \"\"\"\n A residual block that can optionally change the number of channels.\n :param channels: the number of input channels.\n :param emb_channels: the number of timestep embedding channels.\n :param dropout: the rate of dropout.\n :param out_channels: if specified, the number of out channels.\n :param use_conv: if True and out_channels is specified, use a spatial\n convolution instead of a smaller 1x1 convolution to change the\n channels in the skip connection.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param use_checkpoint: if True, use gradient checkpointing on this module.\n :param up: if True, use this block for upsampling.\n :param down: if True, use this block for downsampling.\n \"\"\"\n\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n super().__init__()\n self.channels = channels\n self.emb_channels = emb_channels\n self.dropout = dropout\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_checkpoint = use_checkpoint\n self.use_scale_shift_norm = use_scale_shift_norm\n\n self.in_layers = nn.Sequential(\n normalization(channels),\n nn.SiLU(),\n conv_nd(dims, channels, self.out_channels, 3, padding=1),\n )\n\n self.updown = up or down\n\n if up:\n self.h_upd = Upsample(channels, False, dims)\n self.x_upd = Upsample(channels, False, dims)\n elif down:\n self.h_upd = Downsample(channels, False, dims)\n self.x_upd = Downsample(channels, False, dims)\n else:\n self.h_upd = self.x_upd = nn.Identity()\n\n self.emb_layers = nn.Sequential(\n nn.SiLU(),\n linear(\n emb_channels,\n 2 * self.out_channels if use_scale_shift_norm else self.out_channels,\n ),\n )\n self.out_layers = nn.Sequential(\n normalization(self.out_channels),\n nn.SiLU(),\n nn.Dropout(p=dropout),\n zero_module(\n conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)\n ),\n )\n\n if self.out_channels == channels:\n self.skip_connection = nn.Identity()\n elif use_conv:\n self.skip_connection = conv_nd(\n dims, channels, self.out_channels, 3, padding=1\n )\n else:\n self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)\n\n def forward(self, x, emb):\n \"\"\"\n Apply the block to a Tensor, conditioned on a timestep embedding.\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n return checkpoint(\n self._forward, (x, emb), self.parameters(), self.use_checkpoint\n )\n\n\n def _forward(self, x, emb):\n if self.updown:\n in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]\n h = in_rest(x)\n h = self.h_upd(h)\n x = self.x_upd(x)\n h = in_conv(h)\n else:\n h = self.in_layers(x)\n emb_out = self.emb_layers(emb).type(h.dtype)\n while len(emb_out.shape) < len(h.shape):\n emb_out = emb_out[..., None]\n if self.use_scale_shift_norm:\n out_norm, out_rest = self.out_layers[0], self.out_layers[1:]\n scale, shift = th.chunk(emb_out, 2, dim=1)\n h = out_norm(h) * (1 + scale) + shift\n h = out_rest(h)\n else:\n h = h + emb_out\n h = self.out_layers(h)\n return self.skip_connection(x) + h" }, { "identifier": "Downsample", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class Downsample(nn.Module):\n \"\"\"\n A downsampling layer with an optional convolution.\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n downsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n stride = 2 if dims != 3 else (1, 2, 2)\n if use_conv:\n self.op = conv_nd(\n dims, self.channels, self.out_channels, 3, stride=stride, padding=padding\n )\n else:\n assert self.channels == self.out_channels\n self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n return self.op(x)" }, { "identifier": "AttentionBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class AttentionBlock(nn.Module):\n \"\"\"\n An attention block that allows spatial positions to attend to each other.\n Originally ported from here, but adapted to the N-d case.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.\n \"\"\"\n\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n super().__init__()\n self.channels = channels\n if num_head_channels == -1:\n self.num_heads = num_heads\n else:\n assert (\n channels % num_head_channels == 0\n ), f\"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}\"\n self.num_heads = channels // num_head_channels\n self.use_checkpoint = use_checkpoint\n self.norm = normalization(channels)\n self.qkv = conv_nd(1, channels, channels * 3, 1)\n if use_new_attention_order:\n # split qkv before split heads\n self.attention = QKVAttention(self.num_heads)\n else:\n # split heads before split qkv\n self.attention = QKVAttentionLegacy(self.num_heads)\n\n self.proj_out = zero_module(conv_nd(1, channels, channels, 1))\n\n def forward(self, x):\n return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!\n #return pt_checkpoint(self._forward, x) # pytorch\n\n def _forward(self, x):\n b, c, *spatial = x.shape\n x = x.reshape(b, c, -1)\n qkv = self.qkv(self.norm(x))\n h = self.attention(qkv)\n h = self.proj_out(h)\n return (x + h).reshape(b, c, *spatial)" }, { "identifier": "Upsample", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class Upsample(nn.Module):\n \"\"\"\n An upsampling layer with an optional convolution.\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n upsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, scale_guide=False):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n self.scale_guide = scale_guide\n if use_conv:\n self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n if self.dims == 3:\n x = F.interpolate(\n x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode=\"nearest\"\n )\n else:\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n # if self.scale_guide:\n # x = F.interpolate(x, scale_factor=1.75, mode=\"nearest\")\n # else:\n # x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n if self.use_conv:\n x = self.conv(x)\n return x" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n # z = x\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True,timesteps=1000):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n x_T=None,\n timesteps=1000,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n timesteps=timesteps,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0])\n # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps)\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % 500 == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn import torchvision from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock, Upsample from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,254
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-10-30 14:21:09+00:00
24k
nv-tlabs/pacer
pacer/utils/motion_lib_smpl.py
[ { "identifier": "torch_utils", "path": "pacer/utils/torch_utils.py", "snippet": "def my_quat_rotate(q, v):\ndef quat_to_angle_axis(q):\ndef angle_axis_to_exp_map(angle, axis):\ndef quat_to_exp_map(q):\ndef quat_to_tan_norm(q):\ndef euler_xyz_to_exp_map(roll, pitch, yaw):\ndef exp_map_to_angle_axis(exp_map):\ndef exp_map_to_quat(exp_map):\ndef slerp(q0, q1, t):\ndef calc_heading(q):\ndef calc_heading_quat(q):\ndef calc_heading_quat_inv(q):\ndef activation_facotry(act_name):" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(\n self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps\n )\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(\n cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n\n assert (\n type(skeleton_state) == SkeletonState\n ), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n\n global_velocity = SkeletonMotion._compute_velocity(\n p=skeleton_state.global_translation, time_delta=1 / fps\n )\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(\n r=skeleton_state.global_rotation, time_delta=1 / fps\n )\n\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(\n dict_repr[\"global_angular_velocity\"], *args, **kwargs\n )\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(\n dict_repr[\"skeleton_tree\"], *args, **kwargs\n ),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ]\n )\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(\n fbx_file_path, fbx_configs, root_joint, fps\n )\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(\n transformation_matrix=torch.from_numpy(\n np.swapaxes(np.array(transforms), -1, -2),\n ).float()\n )\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(\n -1, len(joint_parents), 3\n )[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree, r=local_rotation, t=root_translation, is_local=True\n )\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(\n skeleton_state=skeleton_state, fps=fps\n )\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode=\"nearest\"\n )\n / time_delta,\n )\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(\n r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])\n )\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n angular_velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"\n ),\n )\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\n \"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps)\n )\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., : self.num_joints * 4].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 4))\n )\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[\n ..., self.num_joints * 4 : self.num_joints * 4 + 3\n ]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward kinemaitcs.\n \n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation.clone()\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n \n local_transformation[..., :4] = quat_mul(\n self.skeleton_tree._local_xml_rotation,\n local_transformation[..., :4])\n\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(\n local_transformation[..., node_index, :]\n )\n else:\n # Here to factor in the local xml rotation\n\n global_transformation.append(\n transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n )\n )\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(\n self.global_transformation\n )\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[\n ..., node_index, :\n ]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(\n r=self.local_rotation, t=self.local_translation\n )\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (\n tuple(self.tensor.shape[:-1])\n + (len(self.skeleton_tree),)\n + tuple(self.skeleton_tree.local_translation.shape[-1:])\n )\n local_translation = self.skeleton_tree.local_translation.broadcast_to(\n *broadcast_shape\n ).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (\n self.global_translation - self.root_translation.unsqueeze(-1)\n )\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (\n quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation\n )\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (\n global_positions[:, left_shoulder_index].numpy()\n - global_positions[:, right_shoulder_index].numpy()\n + global_positions[:, left_hip_index].numpy()\n - global_positions[:, right_hip_index].numpy()\n )\n side_direction = (\n side_direction\n / np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(\n forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\"\n )\n forward_direction = (\n forward_direction\n / np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n # Tensorbackend: local rotation and translation, rotation is is in quat 33 * 4 + 3\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(\n *(state_shape + (-1,))\n )\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ]\n )\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (\n r.dim() > 0\n ), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n return cls(\n SkeletonState._to_state_vector(r, t),\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (\n transform_translation(transform_mul(p1, p2))\n .reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)\n .mean(axis=0)\n )\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(\n node_names, pairwise_translation\n )\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(\n self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree\n ):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(\n list(joint_mapping_inv)\n )\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (\n len(set(n_joints)) == 1\n ), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(\n map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n )\n )\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(\n node_names, pairwise_translation\n )\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]\n )\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]\n )\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (\n source_state.root_translation - source_tpose.root_translation\n ) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[\n current_index, :\n ] = target_tpose.global_rotation[\n target_tpose.skeleton_tree.index(name), :\n ]\n\n global_rotation_diff = quat_mul_norm(\n source_state.global_rotation, quat_inverse(source_tpose.global_rotation)\n )\n new_global_rotation = quat_mul_norm(\n global_rotation_diff, target_tpose_global_rotation\n )\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[\n :, parent_index, :\n ]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (\n len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0\n ), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" } ]
from ast import If from tqdm import tqdm from poselib.poselib.core.rotation3d import * from isaacgym.torch_utils import * from pacer.utils import torch_utils from poselib.poselib.skeleton.skeleton3d import SkeletonMotion, SkeletonState from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) import numpy as np import os import yaml import joblib import torch import torch.multiprocessing as mp import copy import gc
18,179
betas = curr_gender_betas[1:] height_tolorance = 0.0 vertices_curr, joints_curr = smpl_parsers[gender.item()].get_joints_verts(pose_aa, betas[None, ], trans) offset = joints_curr[:, 0] - trans diff_fix = ((vertices_curr - offset[:, None])[..., -1].min(dim=-1).values - height_tolorance).min() vertices_curr[..., 2].max() - vertices_curr[..., 2].min() trans[..., -1] -= diff_fix return trans def load_motion_with_skeleton(ids, motion_data_list, skeleton_trees, gender_betas, fix_height, smpl_parsers, masterfoot_config, queue, pid): # ZL: loading motion with the specified skeleton. Perfoming forward kinematics to get the joint positions res = {} for f in range(len(motion_data_list)): assert (len(ids) == len(motion_data_list)) curr_id = ids[f] # id for this datasample curr_file = motion_data_list[f] curr_gender_beta = gender_betas[f] trans = curr_file['root_trans_offset'].clone() pose_aa = torch.from_numpy(curr_file['pose_aa']) if fix_height: trans = fix_trans_height(pose_aa, trans, curr_gender_beta, smpl_parsers) pose_quat_global = curr_file['pose_quat_global'] B, J, N = pose_quat_global.shape if not masterfoot_config is None: num_bodies = len(masterfoot_config['body_names']) pose_quat_holder = np.zeros([B, num_bodies, N]) pose_quat_holder[..., -1] = 1 pose_quat_holder[...,masterfoot_config['body_to_orig_without_toe'], :] \ = pose_quat_global[..., masterfoot_config['orig_to_orig_without_toe'], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["L_Toe", "L_Toe_1", "L_Toe_1_1", "L_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["L_Ankle"]], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["R_Toe", "R_Toe_1", "R_Toe_1_1", "R_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["R_Ankle"]], :] pose_quat_global = pose_quat_holder sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_trees[f], torch.from_numpy(pose_quat_global), trans, is_local=False) curr_motion = SkeletonMotion.from_skeleton_state(sk_state, curr_file.get("fps", 30)) curr_dof_vels = compute_motion_dof_vels(curr_motion) curr_motion.dof_vels = curr_dof_vels curr_motion.gender_beta = curr_gender_beta res[curr_id] = (curr_file, curr_motion) if not queue is None: queue.put(res) else: return res class DeviceCache: def __init__(self, obj, device): self.obj = obj self.device = device keys = dir(obj) num_added = 0 for k in keys: try: out = getattr(obj, k) except: # print("Error for key=", k) continue if isinstance(out, torch.Tensor): if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 elif isinstance(out, np.ndarray): out = torch.tensor(out) if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 # print("Total added", num_added) def __getattr__(self, string): out = getattr(self.obj, string) return out class MotionLib(): def __init__(self, motion_file, key_body_ids, device, fix_height = True, masterfoot_conifg = None, min_length = -1): self._key_body_ids = torch.tensor(key_body_ids, device=device) self._device = device self._motion_data = joblib.load(motion_file) if min_length != -1: data_list = {k: v for k, v in list(self._motion_data.items()) if len(v['pose_quat_global']) >= min_length} self._motion_data_list = np.array(list(data_list.values())) self._motion_data_keys = np.array(list(data_list.keys())) else: self._motion_data_list = np.array(list(self._motion_data.values())) self._motion_data_keys = np.array(list(self._motion_data.keys())) self._num_unique_motions = len(self._motion_data_list) self._masterfoot_conifg = masterfoot_conifg data_dir = "data/smpl"
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. USE_CACHE = True print("MOVING MOTION DATA TO GPU, USING CACHE:", USE_CACHE) if not USE_CACHE: old_numpy = torch.Tensor.numpy class Patch: def numpy(self): if self.is_cuda: return self.to("cpu").numpy() else: return old_numpy(self) torch.Tensor.numpy = Patch.numpy def local_rotation_to_dof_vel(local_rot0, local_rot1, dt): # Assume each joint is 3dof diff_quat_data = quat_mul_norm(quat_inverse(local_rot0), local_rot1) diff_angle, diff_axis = quat_angle_axis(diff_quat_data) dof_vel = diff_axis * diff_angle.unsqueeze(-1) / dt return dof_vel[1:, :].flatten() def compute_motion_dof_vels(motion): num_frames = motion.tensor.shape[0] dt = 1.0 / motion.fps dof_vels = [] for f in range(num_frames - 1): local_rot0 = motion.local_rotation[f] local_rot1 = motion.local_rotation[f + 1] frame_dof_vel = local_rotation_to_dof_vel(local_rot0, local_rot1, dt) dof_vels.append(frame_dof_vel) dof_vels.append(dof_vels[-1]) dof_vels = torch.stack(dof_vels, dim=0).view(num_frames, -1, 3) return dof_vels def fix_trans_height(pose_aa, trans, curr_gender_betas, smpl_parsers): with torch.no_grad(): gender = curr_gender_betas[0] betas = curr_gender_betas[1:] height_tolorance = 0.0 vertices_curr, joints_curr = smpl_parsers[gender.item()].get_joints_verts(pose_aa, betas[None, ], trans) offset = joints_curr[:, 0] - trans diff_fix = ((vertices_curr - offset[:, None])[..., -1].min(dim=-1).values - height_tolorance).min() vertices_curr[..., 2].max() - vertices_curr[..., 2].min() trans[..., -1] -= diff_fix return trans def load_motion_with_skeleton(ids, motion_data_list, skeleton_trees, gender_betas, fix_height, smpl_parsers, masterfoot_config, queue, pid): # ZL: loading motion with the specified skeleton. Perfoming forward kinematics to get the joint positions res = {} for f in range(len(motion_data_list)): assert (len(ids) == len(motion_data_list)) curr_id = ids[f] # id for this datasample curr_file = motion_data_list[f] curr_gender_beta = gender_betas[f] trans = curr_file['root_trans_offset'].clone() pose_aa = torch.from_numpy(curr_file['pose_aa']) if fix_height: trans = fix_trans_height(pose_aa, trans, curr_gender_beta, smpl_parsers) pose_quat_global = curr_file['pose_quat_global'] B, J, N = pose_quat_global.shape if not masterfoot_config is None: num_bodies = len(masterfoot_config['body_names']) pose_quat_holder = np.zeros([B, num_bodies, N]) pose_quat_holder[..., -1] = 1 pose_quat_holder[...,masterfoot_config['body_to_orig_without_toe'], :] \ = pose_quat_global[..., masterfoot_config['orig_to_orig_without_toe'], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["L_Toe", "L_Toe_1", "L_Toe_1_1", "L_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["L_Ankle"]], :] pose_quat_holder[..., [ masterfoot_config['body_names'].index(name) for name in ["R_Toe", "R_Toe_1", "R_Toe_1_1", "R_Toe_2"] ], :] = pose_quat_holder[..., [masterfoot_config['body_names'].index(name) for name in ["R_Ankle"]], :] pose_quat_global = pose_quat_holder sk_state = SkeletonState.from_rotation_and_root_translation( skeleton_trees[f], torch.from_numpy(pose_quat_global), trans, is_local=False) curr_motion = SkeletonMotion.from_skeleton_state(sk_state, curr_file.get("fps", 30)) curr_dof_vels = compute_motion_dof_vels(curr_motion) curr_motion.dof_vels = curr_dof_vels curr_motion.gender_beta = curr_gender_beta res[curr_id] = (curr_file, curr_motion) if not queue is None: queue.put(res) else: return res class DeviceCache: def __init__(self, obj, device): self.obj = obj self.device = device keys = dir(obj) num_added = 0 for k in keys: try: out = getattr(obj, k) except: # print("Error for key=", k) continue if isinstance(out, torch.Tensor): if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 elif isinstance(out, np.ndarray): out = torch.tensor(out) if out.is_floating_point(): out = out.to(self.device, dtype=torch.float32) else: out.to(self.device) setattr(self, k, out) num_added += 1 # print("Total added", num_added) def __getattr__(self, string): out = getattr(self.obj, string) return out class MotionLib(): def __init__(self, motion_file, key_body_ids, device, fix_height = True, masterfoot_conifg = None, min_length = -1): self._key_body_ids = torch.tensor(key_body_ids, device=device) self._device = device self._motion_data = joblib.load(motion_file) if min_length != -1: data_list = {k: v for k, v in list(self._motion_data.items()) if len(v['pose_quat_global']) >= min_length} self._motion_data_list = np.array(list(data_list.values())) self._motion_data_keys = np.array(list(data_list.keys())) else: self._motion_data_list = np.array(list(self._motion_data.values())) self._motion_data_keys = np.array(list(self._motion_data.keys())) self._num_unique_motions = len(self._motion_data_list) self._masterfoot_conifg = masterfoot_conifg data_dir = "data/smpl"
smpl_parser_n = SMPL_Parser(model_path=data_dir,
3
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n if self.randomize:\n logger.warning(f'Domain randomization is enabled!')\n self.randomization_params = self.cfg[\"task\"][\"randomization_params\"]\n self.aggregate_mode = self.cfg[\"env\"][\"aggregateMode\"]\n\n self.dist_reward_scale = self.cfg[\"env\"][\"rew\"][\"distRewardScale\"]\n self.rot_reward_scale = self.cfg[\"env\"][\"rew\"][\"rotRewardScale\"]\n self.success_tolerance = self.cfg[\"env\"][\"rew\"][\"successTolerance\"]\n self.reach_goal_bonus = self.cfg[\"env\"][\"rew\"][\"reachGoalBonus\"]\n self.fall_dist = self.cfg[\"env\"][\"rew\"][\"fallDistance\"]\n self.fall_penalty = self.cfg[\"env\"][\"rew\"][\"fallPenalty\"]\n self.rot_eps = self.cfg[\"env\"][\"rew\"][\"rotEps\"]\n\n self.vel_obs_scale = 0.2 # scale factor of velocity based observations\n self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations\n\n self.reset_position_noise = self.cfg[\"env\"][\"resetPositionNoise\"]\n self.reset_rotation_noise = self.cfg[\"env\"][\"resetRotationNoise\"]\n self.reset_dof_pos_noise = self.cfg[\"env\"][\"resetDofPosRandomInterval\"]\n self.reset_dof_vel_noise = self.cfg[\"env\"][\"resetDofVelRandomInterval\"]\n\n self.force_scale = self.cfg[\"env\"].get(\"forceScale\", 0.0)\n self.force_prob_range = self.cfg[\"env\"].get(\"forceProbRange\", [0.001, 0.1])\n self.force_decay = self.cfg[\"env\"].get(\"forceDecay\", 0.99)\n self.force_decay_interval = self.cfg[\"env\"].get(\"forceDecayInterval\", 0.08)\n\n self.dclaw_dof_speed_scale = self.cfg[\"env\"][\"dofSpeedScale\"]\n # self.act_moving_average = self.cfg[\"env\"][\"actionsMovingAverage\"]\n\n self.debug_viz = self.cfg[\"env\"][\"enableDebugVis\"]\n\n self.max_episode_length = self.cfg[\"env\"][\"episodeLength\"]\n self.reset_time = self.cfg[\"env\"].get(\"resetTime\", -1.0)\n self.print_success_stat = self.cfg[\"env\"][\"printNumSuccesses\"]\n self.max_consecutive_successes = self.cfg[\"env\"][\"maxConsecutiveSuccesses\"]\n self.av_factor = self.cfg[\"env\"].get(\"averFactor\", 0.1)\n\n self.object_type = self.cfg[\"env\"][\"objectType\"]\n\n self.asset_files_dict = {\n \"block\": \"urdf/objects/cube_multicolor.urdf\",\n \"egg\": \"mjcf/open_ai_assets/hand/egg.xml\",\n \"airplane\": \"single_objects/airplane/model.urdf\",\n 'power_drill': 'single_objects/power_drill/model.urdf',\n 'mug': 'single_objects/mug/model.urdf',\n 'elephant': 'asymm/train/elephant/var_000/model.urdf',\n 'train': 'asymm/train/train/var_000/model.urdf',\n 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf'\n\n }\n self.objs_in_isaacgym = ['block', 'egg']\n\n if \"asset\" in self.cfg[\"env\"]:\n self.asset_files_dict[\"block\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameBlock\",\n self.asset_files_dict[\"block\"])\n self.asset_files_dict[\"egg\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameEgg\",\n self.asset_files_dict[\"egg\"])\n\n self.obs_type = self.cfg[\"env\"][\"observationType\"]\n\n if not (self.obs_type in [\"full_no_vel\", \"full\", \"full_state\"]):\n raise Exception(\n \"Unknown type of observations!\\nobservationType should be one of: [openai, full_no_vel, full, full_state]\")\n\n print(\"Obs type:\", self.obs_type)\n\n ## TODO: change value here\n self.num_obs_dict = {\n \"full_no_vel\": 42,\n \"full\": 87,\n \"full_state\": 114\n }\n\n self.up_axis = 'z'\n\n num_states = 0\n\n self.cfg[\"env\"][\"numObservations\"] = self.num_obs_dict[self.obs_type]\n self.cfg[\"env\"][\"numStates\"] = num_states\n self.cfg[\"env\"][\"numActions\"] = 12\n self.hist_buf_reset_env_ids = None\n\n super().__init__(config=self.cfg,\n sim_device=sim_device,\n rl_device=rl_device,\n graphics_device_id=graphics_device_id,\n headless=headless)\n\n self.dt = self.sim_params.dt\n control_freq_inv = self.cfg[\"env\"].get(\"controlFrequencyInv\", 1)\n if self.reset_time > 0.0:\n self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))\n print(\"Reset time: \", self.reset_time)\n print(\"New episode length: \", self.max_episode_length)\n\n if self.viewer != None:\n cam_pos = gymapi.Vec3(0.16, -0.5, 0.5)\n cam_target = gymapi.Vec3(0.0, 0.0, 0.15)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n\n actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)\n dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)\n rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)\n self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)\n\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,\n self.num_dclaw_dofs)\n\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)\n self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs]\n self.dclaw_dof_pos = self.dclaw_dof_state[..., 0]\n self.dclaw_dof_vel = self.dclaw_dof_state[..., 1]\n if self.cfg.env.dof_torque_on:\n self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1)\n else:\n self.dclaw_dof_torque = None\n\n self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)\n self.num_bodies = self.rigid_body_states.shape[1]\n\n self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)\n\n if self.cfg.env.rew.pen_tb_contact:\n _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim)\n self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3)\n table_handle = self.gym.find_actor_handle(self.envs[0], 'table')\n self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0],\n table_handle,\n 'table',\n gymapi.DOMAIN_ENV)\n logger.warning(f'Table body index:{self.table_body_index}')\n self.table_contact_force = self.net_contact_force[:, self.table_body_index]\n\n self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs\n self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n\n self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)\n\n self.reset_goal_buf = self.reset_buf.clone()\n self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)\n self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)\n\n self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)\n\n self.total_successes = 0\n self.total_resets = 0\n\n self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)\n self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)\n self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(self.num_envs, device=self.device) + torch.log(\n self.force_prob_range[1]))\n\n self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)\n\n self.num_actions = self.num_dclaw_dofs\n self.actions = self.zero_actions()\n DClawBase.compute_observations(self)\n self.num_observations = self.obs_buf.shape[-1]\n self.cfg.env.numObservations = self.num_observations\n self.create_ob_act_space()\n\n def create_sim(self):\n self.dt = self.cfg[\"sim\"][\"dt\"]\n self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis)\n\n self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)\n self._create_ground_plane()\n self._create_envs(self.num_envs, self.cfg[\"env\"]['envSpacing'], int(np.sqrt(self.num_envs)))\n\n if self.randomize:\n self.apply_randomizations(self.randomization_params)\n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n plane_params.distance = 0.1\n self.gym.add_ground(self.sim, plane_params)\n\n def _create_envs(self, num_envs, spacing, num_per_row):\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()\n object_asset_file = self.asset_files_dict[self.object_type]\n\n dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)\n table_asset = self.get_table_asset()\n table_pose = self.get_table_pose()\n\n if self.obs_type == \"full_state\":\n sensor_pose = gymapi.Transform()\n for ft_handle in self.fingertip_handles:\n self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)\n\n if self.object_type in self.objs_in_isaacgym:\n asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix()\n else:\n asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix()\n\n object_asset_options = gymapi.AssetOptions()\n if self.cfg.env.vhacd:\n object_asset_options.convex_decomposition_from_submeshes = True\n\n object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n object_asset_options.disable_gravity = True\n goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n dclaw_start_pose = self.get_dclaw_start_pose()\n object_start_pose = self.get_object_start_pose(dclaw_start_pose)\n\n goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)\n\n self.dclaws = []\n self.envs = []\n\n self.object_init_state = []\n self.hand_start_states = []\n\n self.hand_indices = []\n self.fingertip_indices = []\n self.object_indices = []\n self.goal_object_indices = []\n\n self.render_camera_handles = []\n if self.cfg.rgb_render:\n render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n print(f'Fingertip handles:{self.fingertip_handles}')\n\n dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)\n object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)\n object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)\n self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))\n self.object_handles = []\n\n max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1\n max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1\n\n for i in range(self.num_envs):\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n\n if self.aggregate_mode >= 1:\n self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)\n\n self.create_hand_actor(env_ptr=env_ptr,\n dclaw_asset=dclaw_asset,\n dclaw_start_pose=dclaw_start_pose,\n dclaw_dof_props=dclaw_dof_props,\n env_id=i)\n\n object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, \"object\", i, 0, 1)\n self.object_handles.append(object_handle)\n self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,\n object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,\n object_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)\n self.object_indices.append(object_idx)\n\n goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, \"goal_object\", i + self.num_envs,\n 0, 2)\n goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)\n self.goal_object_indices.append(goal_object_idx)\n\n if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':\n blockscale = float(self.cfg.env.blockscale)\n self.gym.set_actor_scale(env_ptr, object_handle, blockscale)\n self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)\n\n if self.object_type != \"block\":\n self.gym.set_rigid_body_color(\n env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n self.gym.set_rigid_body_color(\n env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, \"table\", i, 0)\n\n if self.cfg.rgb_render:\n render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)\n self.render_camera_handles.append(render_camera_handle[0])\n\n if self.aggregate_mode > 0:\n self.gym.end_aggregate(env_ptr)\n\n self.envs.append(env_ptr)\n\n self.setup_torch_states()\n\n def create_camera(self, camera_poses, env_ptr, camera_params):\n cam_handles = []\n for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):\n camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)\n if isinstance(camera_poses[ic], tuple):\n self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])\n else:\n self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])\n cam_handles.append(camera_handle)\n return cam_handles\n\n def get_visual_render_camera_setup(self):\n cam_pos = np.array([-0.7, 0, 0.5])\n cam_focus_pt = np.array([0.08, 0, 0.15])\n cam_focus_pt = gymapi.Vec3(*cam_focus_pt)\n cam_pos = gymapi.Vec3(*cam_pos)\n camera_poses = [(cam_pos, cam_focus_pt)]\n camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,\n height=self.cfg.cam.visual_render_height,\n hov=45,\n cuda=False)\n return camera_poses, camera_params\n\n def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):\n dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, \"hand\", env_id, 0, 0)\n if self.cfg.env.dof_torque_on:\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.hand_start_states.append(\n [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,\n dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,\n dclaw_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)\n hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)\n self.hand_indices.append(hand_idx)\n\n self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)\n if self.obs_type == \"full_state\":\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.dclaws.append(dclaw_actor)\n self.set_hand_color(env_ptr, dclaw_actor)\n\n def set_hand_color(self, env_ptr, dclaw_actor):\n rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)\n for bd, bd_id in rgd_dict.items():\n if bd not in dclaw_body_color_mapping:\n continue\n color = gymapi.Vec3(*dclaw_body_color_mapping[bd])\n self.gym.set_rigid_body_color(env_ptr, dclaw_actor,\n bd_id, gymapi.MESH_VISUAL,\n color)\n\n def get_table_asset(self):\n asset_options = gymapi.AssetOptions()\n asset_options.armature = 0.001\n asset_options.fix_base_link = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n table_dims = gymapi.Vec3(0.6, 0.6, 0.1)\n table_asset = self.gym.create_box(self.sim,\n table_dims.x,\n table_dims.y,\n table_dims.z,\n asset_options)\n table_props = self.gym.get_asset_rigid_shape_properties(table_asset)\n for p in table_props:\n p.friction = self.cfg.env.table.friction\n p.torsion_friction = self.cfg.env.table.torsion_friction\n p.restitution = self.cfg.env.table.restitution\n p.rolling_friction = self.cfg.env.table.rolling_friction\n self.gym.set_asset_rigid_shape_properties(table_asset, table_props)\n return table_asset\n\n def get_table_pose(self):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n object_start_pose.p.x = 0\n object_start_pose.p.y = 0\n object_start_pose.p.z = -0.05\n return object_start_pose\n\n def get_dclaw_start_pose(self):\n dclaw_start_pose = gymapi.Transform()\n dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx))\n dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi)\n return dclaw_start_pose\n\n def setup_torch_states(self):\n self.render_rgb_obs_buf = None\n if self.cfg.rgb_render:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0))\n else:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0))\n self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(\n self.num_envs, 13)\n self.goal_states = self.object_init_state.clone()\n self.goal_states[:, self.up_axis_idx] -= 0.04\n self.goal_init_state = self.goal_states.clone()\n self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)\n\n self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device)\n self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)\n self.object_rb_masses = None\n self.update_obj_mass()\n self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)\n self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)\n self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)\n\n def get_dclaw_asset(self, asset_root=None, asset_options=None):\n # load dclaw asset\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.flip_visual_attachments = False\n asset_options.fix_base_link = True\n asset_options.collapse_fixed_joints = False\n asset_options.disable_gravity = False\n asset_options.thickness = 0.001\n asset_options.angular_damping = 0.01\n asset_options.override_inertia = True\n asset_options.override_com = True\n logger.info(f'VHACD:{self.cfg.env.vhacd}')\n if self.cfg.env.vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n if self.cfg.physics_engine == \"physx\":\n # if self.physics_engine == gymapi.SIM_PHYSX:\n asset_options.use_physx_armature = True\n asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS\n\n if asset_root is None:\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix()\n robot_name = self.cfg.env.robot\n asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix()\n dclaw_asset = self.gym.load_asset(self.sim, asset_root, f\"{robot_name}.urdf\", asset_options)\n print(f'Dclaw asset root:{asset_root} robot name:{robot_name}')\n\n self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset)\n self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset)\n self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset)\n\n print(f'D-Claw:')\n print(f'\\t Number of bodies: {self.num_dclaw_bodies}')\n print(f'\\t Number of shapes: {self.num_dclaw_shapes}')\n print(f'\\t Number of dofs: {self.num_dclaw_dofs}')\n\n self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset)\n joint_names = self.dclaw_asset_dof_dict.keys()\n logger.info(f'Joint names:{joint_names}')\n\n self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values())\n dinds = np.array(self.dof_joint_indices)\n assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending)\n\n rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset)\n self.fingertips = [x for x in rb_links if 'tip_link' in x] # [\"one_tip_link\", \"two_tip_link\", \"three_tip_link\"]\n self.num_fingertips = len(self.fingertips)\n\n print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}')\n\n print(f'Actuator --- DoF Index')\n for act_name, act_index in zip(joint_names, self.dof_joint_indices):\n print(f'\\t {act_name} {act_index}')\n\n dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset)\n\n def set_dof_prop(props, prop_name, val):\n if np.isscalar(val):\n props[prop_name].fill(val)\n elif len(val) == 3:\n props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3))\n else:\n props[prop_name] = np.array(val)\n\n if self.cfg[\"env\"][\"dof_vel_hard_limit\"] is not None:\n vel_hard_limit = self.cfg[\"env\"][\"dof_vel_hard_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_dof_vel_hard_limit\"]\n print(f'Setting DOF velocity limit to:{vel_hard_limit}')\n set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit)\n if self.cfg[\"env\"][\"effort_limit\"] is not None:\n effort_limit = self.cfg[\"env\"][\"effort_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_effort_limit\"]\n print(f'Setting DOF effort limit to:{effort_limit}')\n set_dof_prop(dclaw_dof_props, 'effort', effort_limit)\n if self.cfg[\"env\"][\"stiffness\"] is not None:\n stiffness = self.cfg[\"env\"][\"stiffness\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_stiffness\"]\n print(f'Setting stiffness to:{stiffness}')\n set_dof_prop(dclaw_dof_props, 'stiffness', stiffness)\n if self.cfg[\"env\"][\"damping\"] is not None:\n damping = self.cfg[\"env\"][\"damping\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_damping\"]\n print(f'Setting damping to:{damping}')\n set_dof_prop(dclaw_dof_props, 'damping', damping)\n\n self.dclaw_dof_lower_limits = []\n self.dclaw_dof_upper_limits = []\n\n self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype)\n self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos']\n self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel']\n for i in range(self.num_dclaw_dofs):\n self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i])\n self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i])\n if i % 3 == 1:\n self.dclaw_default_dof_pos[i] = 0.8\n elif i % 3 == 2:\n self.dclaw_default_dof_pos[i] = -1.1\n else:\n self.dclaw_default_dof_pos[i] = 0.\n self.dclaw_default_dof_vel[i] = 0.0\n\n self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device)\n self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device)\n self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device)\n self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device)\n self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device)\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n\n dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset)\n for p in dclaw_asset_props:\n p.friction = self.cfg.env.hand.friction\n p.torsion_friction = self.cfg.env.hand.torsion_friction\n p.rolling_friction = self.cfg.env.hand.rolling_friction\n p.restitution = self.cfg.env.hand.restitution\n self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props)\n return dclaw_asset, dclaw_dof_props\n\n def get_object_start_pose(self, dclaw_start_pose):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n if self.cfg.env.obj_init_delta_pos is not None:\n delta_pos = self.cfg.env.obj_init_delta_pos\n object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0]\n object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1]\n object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2]\n else:\n object_start_pose.p.x = dclaw_start_pose.p.x\n pose_dy, pose_dz = 0., -0.13\n object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy\n object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz\n return object_start_pose\n\n def get_goal_object_start_pose(self, object_start_pose):\n self.goal_displacement = gymapi.Vec3(0., 0, 0.25)\n self.goal_displacement_tensor = to_torch(\n [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)\n goal_start_pose = gymapi.Transform()\n goal_start_pose.p = object_start_pose.p + self.goal_displacement\n return goal_start_pose\n\n def set_dof_props(self, props_dict):\n param_setters_map = get_property_setter_map(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n prop_name = 'dof_properties'\n setter = param_setters_map[prop_name]\n for env_id in range(len(self.envs)):\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, 'hand')\n prop = param_getters_map[prop_name](env, handle)\n for dof_prop_name, dof_prop_values in props_dict.items():\n if env_id == 0:\n assert len(dof_prop_values) == len(self.envs)\n prop_val = dof_prop_values[env_id]\n prop[dof_prop_name].fill(prop_val)\n success = setter(env, handle, prop)\n if not success:\n logger.warning(f'Setting dof properties is not successful!')\n\n def update_obj_mass(self, env_ids=None):\n object_rb_masses = []\n env_pool = env_ids if env_ids is not None else list(range(self.num_envs))\n if len(env_pool) < 1:\n return\n for env_id, object_handle in zip(env_pool, self.object_handles):\n env_ptr = self.envs[env_id]\n object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)\n object_rb_masses.append([prop.mass for prop in object_rb_props])\n if self.object_rb_masses is None:\n self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n else:\n self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n self.reset_buf.fill_(1)\n self.reset_goal_buf.fill_(1)\n if self.cfg.env.action_ema is not None:\n self.action_ema_val = zero_actions.clone()\n # step the simulator\n\n self.step(zero_actions)\n\n return self.update_obs()\n\n def compute_reward(self, actions):\n res = compute_dclaw_reward(\n self.reset_buf, self.reset_goal_buf, self.progress_buf,\n self.successes, self.max_episode_length,\n self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,\n self.cfg['env']['rew'], self.actions,\n self.fingertip_pos, self.fingertip_vel, self.object_linvel, self.object_angvel,\n self.dclaw_dof_vel, self.dclaw_dof_torque,\n table_cf=self.table_contact_force if self.cfg.env.rew.pen_tb_contact else None\n )\n self.rew_buf[:] = res[0] * self.cfg.env.rew.rew_scale\n self.done_buf[:] = res[1]\n self.reset_buf[:] = res[2]\n self.reset_goal_buf[:] = res[3]\n self.progress_buf[:] = res[4]\n self.successes[:] = res[5]\n abs_rot_dist = res[6]\n reward_terms = res[7]\n timeout_envs = res[8]\n\n self.extras['success'] = self.reset_goal_buf.detach().to(self.rl_device).flatten()\n self.extras['abs_dist'] = abs_rot_dist.detach().to(self.rl_device)\n self.extras['TimeLimit.truncated'] = timeout_envs.detach().to(self.rl_device)\n for reward_key, reward_val in reward_terms.items():\n self.extras[reward_key] = reward_val.detach()\n\n def get_images(self):\n rgb = self.render_rgb_obs_buf\n return rgb\n\n def compute_observations(self):\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n self.gym.refresh_force_sensor_tensor(self.sim)\n self.gym.refresh_dof_force_tensor(self.sim)\n\n if self.cfg.env.rew.pen_tb_contact:\n self.gym.refresh_net_contact_force_tensor(self.sim)\n\n self.object_pose = self.root_state_tensor[self.object_indices, 0:7]\n self.object_pos = self.root_state_tensor[self.object_indices, 0:3]\n self.object_rot = self.root_state_tensor[self.object_indices, 3:7]\n self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]\n self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]\n\n self.goal_pose = self.goal_states[:, 0:7]\n self.goal_pos = self.goal_states[:, 0:3]\n self.goal_rot = self.goal_states[:, 3:7]\n\n self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]\n self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]\n self.fingertip_vel = self.rigid_body_states[:, self.fingertip_handles][:, :, 7:13]\n\n if self.obs_type == \"full_no_vel\":\n obs_buf = self.compute_full_observations(no_vel=True)\n elif self.obs_type == \"full\":\n obs_buf = self.compute_full_observations()\n elif self.obs_type == \"full_state\":\n obs_buf = self.compute_full_state()\n else:\n print(\"Unkown observations type!\")\n self.obs_buf = obs_buf\n\n if self.cfg.rgb_render:\n self.gym.fetch_results(self.sim, True)\n self.gym.step_graphics(self.sim)\n self.gym.render_all_camera_sensors(self.sim)\n self.gym.start_access_image_tensors(self.sim)\n self.render_rgb_obs_buf = self.get_numpy_rgb_images(self.render_camera_handles)\n self.gym.end_access_image_tensors(self.sim)\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n\n def compute_full_observations(self, no_vel=False):\n scaled_dof_pos = unscale(\n self.dclaw_dof_pos,\n self.dclaw_dof_lower_limits,\n self.dclaw_dof_upper_limits\n )\n quat_dist = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))\n\n if no_vel:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.object_pose,\n self.goal_rot,\n quat_dist,\n self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n else:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.vel_obs_scale * self.dclaw_dof_vel,\n self.object_pose,\n self.object_linvel,\n self.vel_obs_scale * self.object_angvel,\n self.goal_rot,\n quat_dist,\n self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n return out\n\n def compute_full_state(self):\n obs_buf = self.compute_full_observations()\n obs_no_actions = obs_buf[:, :-9]\n actions = obs_buf[:, -9:]\n out = torch.cat(\n [\n obs_no_actions,\n self.force_torque_obs_scale * self.dof_force_tensor,\n self.force_torque_obs_scale * self.vec_sensor_tensor,\n actions\n ],\n dim=-1\n )\n\n return out\n\n def update_obs(self):\n if self.randomize:\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def reset_target_pose(self, env_ids, apply_reset=False):\n new_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]\n self.goal_states[env_ids, 3:7] = new_rot\n self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor\n self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])\n\n if apply_reset:\n goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))\n self.reset_goal_buf[env_ids] = 0\n\n def reset_idx(self, env_ids, goal_env_ids):\n if self.randomize and not self.cfg.env.rand_once:\n self.apply_randomizations(self.randomization_params)\n\n rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_dclaw_dofs * 2 + 3), device=self.device)\n\n self.reset_target_pose(env_ids)\n self.rb_forces[env_ids, :, :] = 0.0\n\n self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()\n self.root_state_tensor[self.object_indices[env_ids], 0:3] = self.object_init_state[env_ids, 0:3] + \\\n self.reset_position_noise * rand_floats[:, 0:3]\n\n new_object_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot\n self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.object_indices[env_ids], 7:13])\n\n object_indices = torch.unique(torch.cat([self.object_indices[env_ids],\n self.goal_object_indices[env_ids],\n self.goal_object_indices[goal_env_ids]]).to(torch.int32))\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(object_indices), len(object_indices))\n self.random_force_prob[env_ids] = torch.exp(\n (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))\n\n delta_max = self.dclaw_dof_upper_limits - self.dclaw_default_dof_pos\n delta_min = self.dclaw_dof_lower_limits - self.dclaw_default_dof_pos\n rand_delta = delta_min + (delta_max - delta_min) * rand_floats[:, 3:3 + self.num_dclaw_dofs]\n\n pos = self.dclaw_default_dof_pos + self.reset_dof_pos_noise * rand_delta\n self.dclaw_dof_pos[env_ids, :] = pos\n self.dclaw_dof_vel[env_ids, :] = self.dclaw_default_dof_vel + \\\n self.reset_dof_vel_noise * rand_floats[:,\n 3 + self.num_dclaw_dofs:3 + self.num_dclaw_dofs * 2]\n self.prev_targets[env_ids, :self.num_dclaw_dofs] = pos\n self.cur_targets[env_ids, :self.num_dclaw_dofs] = pos\n\n hand_indices = self.hand_indices[env_ids].to(torch.int32)\n self.gym.set_dof_position_target_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.prev_targets),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n\n self.progress_buf[env_ids] = 0\n self.reset_buf[env_ids] = 0\n self.successes[env_ids] = 0\n\n def get_numpy_rgb_images(self, camera_handles):\n rgb_obs_buf = []\n for cam_handles, env in zip(camera_handles, self.envs):\n cam_ob = []\n if isinstance(cam_handles, list):\n for cam_handle in cam_handles:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handle, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n cam_ob.append(color_image)\n rgb_obs_buf.append(cam_ob)\n else:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handles, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n rgb_obs_buf.append(color_image)\n rgb_obs_buf = np.stack(rgb_obs_buf)\n return rgb_obs_buf\n\n def pre_physics_step(self, actions):\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)\n\n if len(goal_env_ids) > 0 and len(env_ids) == 0:\n self.reset_target_pose(goal_env_ids, apply_reset=True)\n elif len(goal_env_ids) > 0:\n self.reset_target_pose(goal_env_ids)\n\n if len(env_ids) > 0:\n self.reset_idx(env_ids, goal_env_ids)\n\n self.actions = actions.clone().to(self.device)\n\n if self.cfg.env.action_ema is not None:\n self.action_ema_val[env_ids] = 0\n self.action_ema_val[goal_env_ids] = 0\n self.actions = self.actions * self.cfg.env.action_ema + self.action_ema_val * (1 - self.cfg.env.action_ema)\n self.action_ema_val = self.actions.clone()\n if self.cfg.env.dof_vel_pol_limit is not None:\n delta_action = self.actions * self.cfg.env.dof_vel_pol_limit * (self.dt * self.cfg.env.controlFrequencyInv)\n else:\n delta_action = self.dclaw_dof_speed_scale * self.dt * self.actions\n if self.cfg.env.relativeToPrevTarget:\n targets = self.prev_targets[:, self.dof_joint_indices] + delta_action\n else:\n targets = self.dclaw_dof_pos + delta_action\n\n self.cur_targets[:, self.dof_joint_indices] = tensor_clamp(targets,\n self.dclaw_dof_lower_limits[\n self.dof_joint_indices],\n self.dclaw_dof_upper_limits[\n self.dof_joint_indices])\n\n self.prev_targets[:, self.dof_joint_indices] = self.cur_targets[:, self.dof_joint_indices]\n self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))\n\n if self.force_scale > 0.0:\n self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)\n # apply new forces\n force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()\n rb_force_shape = self.rb_forces[force_indices, self.object_rb_handles, :].shape\n rb_force_dir = torch.randn(rb_force_shape, device=self.device)\n rb_force_dir = rb_force_dir / rb_force_dir.norm(dim=-1, keepdim=True)\n self.rb_forces[force_indices, self.object_rb_handles, :] = rb_force_dir * self.object_rb_masses[force_indices] * self.force_scale\n self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None,\n gymapi.LOCAL_SPACE)\n\n def post_physics_step(self):\n self.progress_buf += 1\n self.randomize_buf += 1\n\n self.compute_observations()\n self.compute_reward(self.actions)\n\n if self.viewer and self.debug_viz:\n # draw axes on target object\n self.gym.clear_lines(self.viewer)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n for i in range(self.num_envs):\n targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])\n\n objectx = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n objecty = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n objectz = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.object_pos[i].cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])" }, { "identifier": "chunker_list", "path": "dexenv/utils/common.py", "snippet": "def chunker_list(seq_list, nchunks):\n # split the list into n parts/chunks\n return [seq_list[i::nchunks] for i in range(nchunks)]" }, { "identifier": "get_all_files_with_name", "path": "dexenv/utils/common.py", "snippet": "def get_all_files_with_name(directory, name,\n exclude_patterns=None,\n include_patterns=None,\n sort=True,\n ):\n directory = pathlib_file(directory)\n files = directory.glob(f'**/{name}')\n files = [x for x in files if x.is_file() and x.name == name]\n if exclude_patterns is not None:\n files = filter_with_exclude_patterns(files, exclude_patterns)\n if include_patterns is not None:\n files = filter_with_include_patterns(files, include_patterns)\n if sort:\n files = sorted(files)\n return files" }, { "identifier": "load_from_pickle", "path": "dexenv/utils/common.py", "snippet": "def load_from_pickle(file_name):\n file_name = pathlib_file(file_name)\n with file_name.open('rb') as f:\n data = pkl.load(f)\n return data" }, { "identifier": "load_a_goal_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_a_goal_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n asset_options.override_inertia = True\n # asset_options.override_com = True\n\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_an_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_an_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.thickness = 0.001\n asset_options.override_inertia = True\n # asset_options.override_com = True\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_obj_texture", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_obj_texture(gym, sim, object_urdf):\n texture_files = get_all_files_with_suffix(object_urdf.parent, 'png')\n num_textures = len(texture_files)\n if num_textures > 1:\n logger.warning(f'Multiple image files exist, will use the first image as the texture!')\n elif num_textures == 0:\n raise RuntimeError(f'No texture file is found!')\n texture_file = texture_files[0]\n texture_handle = gym.create_texture_from_file(sim,\n texture_file.as_posix(),\n )\n return texture_handle" } ]
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,462
# add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = []
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = []
obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd)
5
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/train/pipeline.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then provides the parameters necessary to reset any dead neurons.\n\n Motivation:\n Over the course of training, a subset of autoencoder neurons will have zero activity across\n a large number of datapoints. The authors of *Towards Monosemanticity: Decomposing Language\n Models With Dictionary Learning* found that “resampling” these dead neurons during training\n improves the number of likely-interpretable features (i.e., those in the high density\n cluster) and reduces total loss. This resampling may be compatible with the Lottery Ticket\n Hypothesis and increase the number of chances the network has to find promising feature\n directions.\n\n An interesting nuance around dead neurons involves the ultralow density cluster. They found\n that if we increase the number of training steps then networks will kill off more of these\n ultralow density neurons. This reinforces the use of the high density cluster as a useful\n metric because there can exist neurons that are de facto dead but will not appear to be when\n looking at the number of dead neurons alone.\n\n This approach is designed to seed new features to fit inputs where the current autoencoder\n performs worst. Resetting the encoder norm and bias are crucial to ensuring this resampled\n neuron will only fire weakly for inputs similar to the one used for its reinitialization.\n This was done to minimize interference with the rest of the network.\n\n Warning:\n The optimizer should be reset after applying this function, as the Adam state will be\n incorrect for the modified weights and biases.\n\n Warning:\n This approach is also known to create sudden loss spikes, and resampling too frequently\n causes training to diverge.\n \"\"\"\n\n _activations_seen_since_last_resample: int = 0\n \"\"\"Number of activations since we last resampled.\"\"\"\n\n _collated_neuron_activity: Float[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Collated neuron activity, over the current data collection window.\"\"\"\n\n _threshold_is_dead_portion_fires: float\n \"\"\"Threshold for determining if a neuron has fired (or is dead).\"\"\"\n\n _max_n_resamples: int\n \"\"\"Maximum number of times that resampling should be performed.\"\"\"\n\n _n_activations_collated_since_last_resample: int = 0\n \"\"\"Number of activations collated since we last resampled.\n\n Number of vectors used to collate neuron activity, over the current collation window.\n \"\"\"\n\n _n_components: int\n \"\"\"Number of components.\"\"\"\n\n _n_times_resampled: int = 0\n \"\"\"Number of times that resampling has been performed.\"\"\"\n\n neuron_activity_window_end: int\n \"\"\"End of the window for collecting neuron activity.\"\"\"\n\n neuron_activity_window_start: int\n \"\"\"Start of the window for collecting neuron activity.\"\"\"\n\n @validate_call\n def __init__(\n self,\n n_learned_features: PositiveInt,\n n_components: NonNegativeInt = 1,\n resample_interval: PositiveInt = 200_000_000,\n max_n_resamples: NonNegativeInt = 4,\n n_activations_activity_collate: PositiveInt = 100_000_000,\n resample_dataset_size: PositiveInt = 819_200,\n threshold_is_dead_portion_fires: Annotated[float, Field(strict=True, ge=0, le=1)] = 0.0,\n ) -> None:\n r\"\"\"Initialize the activation resampler.\n\n Defaults to values used in the Anthropic Towards Monosemanticity paper.\n\n Args:\n n_learned_features: Number of learned features\n n_components: Number of components that the SAE is being trained on.\n resample_interval: Interval in number of autoencoder input activation vectors trained\n on, before resampling.\n max_n_resamples: Maximum number of resamples to perform throughout the entire pipeline.\n Set to inf if you want to have no limit.\n n_activations_activity_collate: Number of autoencoder learned activation vectors to\n collate before resampling (the activation resampler will start collecting on vector\n $\\text{resample_interval} - \\text{n_steps_collate}$).\n resample_dataset_size: Number of autoencoder input activations to use for calculating\n the loss, as part of the resampling process to create the reset neuron weights.\n threshold_is_dead_portion_fires: Threshold for determining if a neuron is dead (has\n \"fired\" in less than this portion of the collated sample).\n\n Raises:\n ValueError: If any of the arguments are invalid (e.g. negative integers).\n \"\"\"\n if n_activations_activity_collate > resample_interval:\n error_message = (\n \"Number of steps to collate must be less than or equal to the resample interval.\"\n )\n raise ValueError(error_message)\n\n super().__init__()\n self.neuron_activity_window_end = resample_interval\n self.neuron_activity_window_start = resample_interval - n_activations_activity_collate\n self._max_n_resamples = max_n_resamples\n self._collated_neuron_activity = torch.zeros(\n (n_components, n_learned_features), dtype=torch.int64\n )\n self._resample_dataset_size = resample_dataset_size\n self._threshold_is_dead_portion_fires = threshold_is_dead_portion_fires\n self._n_components = n_components\n\n def _get_dead_neuron_indices(\n self,\n ) -> list[Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]]:\n \"\"\"Identify the indices of neurons that are dead.\n\n Identifies any neurons that have fired less than the threshold portion of the collated\n sample size.\n\n Example:\n >>> resampler = ActivationResampler(n_learned_features=6, n_components=2)\n >>> resampler._collated_neuron_activity = torch.tensor(\n ... [[1, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 0]]\n ... )\n >>> resampler._get_dead_neuron_indices()\n [tensor([2, 3]), tensor([5])]\n\n Returns:\n List of dead neuron indices for each component.\n\n Raises:\n ValueError: If no neuron activity has been collated yet.\n \"\"\"\n # Check we have already collated some neuron activity\n if torch.all(self._collated_neuron_activity == 0):\n error_message = \"Cannot get dead neuron indices without neuron activity.\"\n raise ValueError(error_message)\n\n # Find any neurons that fire less than the threshold portion of times\n threshold_is_dead_n_fires: int = int(\n self._n_activations_collated_since_last_resample * self._threshold_is_dead_portion_fires\n )\n\n return [\n torch.where(self._collated_neuron_activity[component_idx] <= threshold_is_dead_n_fires)[\n 0\n ].to(dtype=torch.int64)\n for component_idx in range(self._n_components)\n ]\n\n def compute_loss_and_get_activations(\n self,\n store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> LossInputActivationsTuple:\n \"\"\"Compute the loss on a random subset of inputs.\n\n Motivation:\n Helps find input vectors that have high SAE loss, so that we can resample dead neurons\n in a way that improves performance on these specific input vectors.\n\n Args:\n store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n A tuple of loss per item, and all input activations.\n\n Raises:\n ValueError: If the number of items in the store is less than the number of inputs\n \"\"\"\n with torch.no_grad():\n loss_batches: list[Float[Tensor, Axis.BATCH]] = []\n input_activations_batches: list[\n Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n dataloader = DataLoader(store, batch_size=train_batch_size)\n n_inputs = self._resample_dataset_size\n n_batches_required: int = n_inputs // train_batch_size\n model_device: torch.device = get_model_device(autoencoder)\n\n for batch_idx, batch in enumerate(iter(dataloader)):\n input_activations_batches.append(batch)\n source_activations = batch.to(model_device)\n learned_activations, reconstructed_activations = autoencoder(source_activations)\n loss_batches.append(\n loss_fn.forward(\n source_activations, learned_activations, reconstructed_activations\n )\n )\n if batch_idx >= n_batches_required:\n break\n\n loss_per_item = torch.cat(loss_batches).to(model_device)\n input_activations = torch.cat(input_activations_batches).to(model_device)\n\n # Check we generated enough data\n if len(loss_per_item) < n_inputs:\n error_message = (\n f\"Cannot get {n_inputs} items from the store, \"\n f\"as only {len(loss_per_item)} were available.\"\n )\n raise ValueError(error_message)\n\n return LossInputActivationsTuple(loss_per_item, input_activations)\n\n @staticmethod\n def assign_sampling_probabilities(\n loss: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Assign the sampling probabilities for each input activations vector.\n\n Assign each input vector a probability of being picked that is proportional to the square of\n the autoencoder's loss on that input.\n\n Examples:\n >>> loss = torch.tensor([1.0, 2.0, 3.0])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([0.0700, 0.2900, 0.6400])\n\n >>> loss = torch.tensor([[1.0, 2], [2, 4], [3, 6]])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([[0.0700, 0.0700],\n [0.2900, 0.2900],\n [0.6400, 0.6400]])\n\n Args:\n loss: Loss per item.\n\n Returns:\n A tensor of probabilities for each item.\n \"\"\"\n square_loss = loss.pow(2)\n return square_loss / square_loss.sum(0)\n\n @staticmethod\n def sample_input(\n probabilities: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n n_samples: list[int],\n ) -> list[Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]]:\n \"\"\"Sample an input vector based on the provided probabilities.\n\n Example:\n >>> probabilities = torch.tensor([[0.1], [0.2], [0.7]])\n >>> input_activations = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]])\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = ActivationResampler.sample_input(\n ... probabilities, input_activations, [2]\n ... )\n >>> sampled_input[0].tolist()\n [[5.0, 6.0], [3.0, 4.0]]\n\n Args:\n probabilities: Probabilities for each input.\n input_activations: Input activation vectors.\n n_samples: Number of samples to take (number of dead neurons).\n\n Returns:\n Sampled input activation vector.\n\n Raises:\n ValueError: If the number of samples is greater than the number of input activations.\n \"\"\"\n sampled_inputs: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n\n for component_idx, component_n_samples in enumerate(n_samples):\n component_probabilities: Float[Tensor, Axis.BATCH] = get_component_slice_tensor(\n input_tensor=probabilities,\n n_dim_with_component=2,\n component_dim=1,\n component_idx=component_idx,\n )\n\n component_input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(\n input_tensor=input_activations,\n n_dim_with_component=3,\n component_dim=1,\n component_idx=component_idx,\n )\n\n if component_n_samples > len(component_input_activations):\n exception_message = (\n f\"Cannot sample {component_n_samples} inputs from \"\n f\"{len(component_input_activations)} input activations.\"\n )\n raise ValueError(exception_message)\n\n # Handle the 0 dead neurons case\n if component_n_samples == 0:\n sampled_inputs.append(\n torch.empty(\n (0, component_input_activations.shape[-1]),\n dtype=component_input_activations.dtype,\n device=component_input_activations.device,\n )\n )\n continue\n\n # Handle the 1+ dead neuron case\n component_sample_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX] = torch.multinomial(\n component_probabilities, num_samples=component_n_samples\n )\n sampled_inputs.append(component_input_activations[component_sample_indices, :])\n\n return sampled_inputs\n\n @staticmethod\n def renormalize_and_scale(\n sampled_input: Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n neuron_activity: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE)],\n encoder_weight: Float[Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n ) -> Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Renormalize and scale the resampled dictionary vectors.\n\n Renormalize the input vector to equal the average norm of the encoder weights for alive\n neurons times 0.2.\n\n Example:\n >>> from torch.nn import Parameter\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = torch.tensor([[3.0, 4.0]])\n >>> neuron_activity = torch.tensor([3, 0, 5, 0, 1, 3])\n >>> encoder_weight = Parameter(torch.ones((6, 2)))\n >>> rescaled_input = ActivationResampler.renormalize_and_scale(\n ... sampled_input,\n ... neuron_activity,\n ... encoder_weight\n ... )\n >>> rescaled_input.round(decimals=1)\n tensor([[0.2000, 0.2000]])\n\n Args:\n sampled_input: Tensor of the sampled input activation.\n neuron_activity: Tensor representing the number of times each neuron fired.\n encoder_weight: Tensor of encoder weights.\n\n Returns:\n Rescaled sampled input.\n\n Raises:\n ValueError: If there are no alive neurons.\n \"\"\"\n alive_neuron_mask: Bool[Tensor, \" learned_features\"] = neuron_activity > 0\n\n # Check there is at least one alive neuron\n if not torch.any(alive_neuron_mask):\n error_message = \"No alive neurons found.\"\n raise ValueError(error_message)\n\n # Handle no dead neurons\n n_dead_neurons = len(sampled_input)\n if n_dead_neurons == 0:\n return torch.empty(\n (0, sampled_input.shape[-1]), dtype=sampled_input.dtype, device=sampled_input.device\n )\n\n # Calculate the average norm of the encoder weights for alive neurons.\n detached_encoder_weight = encoder_weight.detach() # Don't track gradients\n alive_encoder_weights: Float[\n Tensor, Axis.names(Axis.ALIVE_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = detached_encoder_weight[alive_neuron_mask, :]\n average_alive_norm: Float[Tensor, Axis.SINGLE_ITEM] = alive_encoder_weights.norm(\n dim=-1\n ).mean()\n\n # Renormalize the input vector to equal the average norm of the encoder weights for alive\n # neurons times 0.2.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input, dim=-1)\n return renormalized_input * (average_alive_norm * 0.2)\n\n def resample_dead_neurons(\n self,\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults]:\n \"\"\"Resample dead neurons.\n\n Args:\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n For each component that the SAE is being trained on, the indices of dead neurons and the\n updates for the encoder and decoder weights and biases.\n \"\"\"\n parameter_update_results: list[ParameterUpdateResults] = []\n\n with torch.no_grad():\n dead_neuron_indices: list[\n Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]\n ] = self._get_dead_neuron_indices()\n\n # Compute the loss for the current model on a random subset of inputs and get the\n # activations.\n loss_per_item, input_activations = self.compute_loss_and_get_activations(\n store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Assign each input vector a probability of being picked that is proportional to the\n # square of the autoencoder's loss on that input.\n sample_probabilities: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = self.assign_sampling_probabilities(loss_per_item)\n\n # For each dead neuron sample an input according to these probabilities.\n sampled_input: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = self.sample_input(\n sample_probabilities, input_activations, [len(dead) for dead in dead_neuron_indices]\n )\n\n for component_idx in range(self._n_components):\n # Renormalize each input vector to have unit L2 norm and set this to be the\n # dictionary vector for the dead autoencoder neuron.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input[component_idx], dim=-1)\n\n dead_decoder_weight_updates = rearrange(\n renormalized_input, \"dead_neuron input_feature -> input_feature dead_neuron\"\n )\n\n # For the corresponding encoder vector, renormalize the input vector to equal the\n # average norm of the encoder weights for alive neurons times 0.2. Set the\n # corresponding encoder bias element to zero.\n encoder_weight: Float[\n Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(autoencoder.encoder.weight, 3, 0, component_idx)\n\n rescaled_sampled_input = self.renormalize_and_scale(\n sampled_input=sampled_input[component_idx],\n neuron_activity=self._collated_neuron_activity[component_idx],\n encoder_weight=encoder_weight,\n )\n\n dead_encoder_bias_updates = torch.zeros_like(\n dead_neuron_indices[component_idx],\n dtype=dead_decoder_weight_updates.dtype,\n device=dead_decoder_weight_updates.device,\n )\n\n parameter_update_results.append(\n ParameterUpdateResults(\n dead_neuron_indices=dead_neuron_indices[component_idx],\n dead_encoder_weight_updates=rescaled_sampled_input,\n dead_encoder_bias_updates=dead_encoder_bias_updates,\n dead_decoder_weight_updates=dead_decoder_weight_updates,\n )\n )\n\n return parameter_update_results\n\n def step_resampler(\n self,\n batch_neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)],\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults] | None:\n \"\"\"Step the resampler, collating neuron activity and resampling if necessary.\n\n Args:\n batch_neuron_activity: Number of times each neuron fired in the current batch.\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n Parameter update results (for each component that the SAE is being trained on) if\n resampling is due. Otherwise None.\n \"\"\"\n # Update the counter\n self._activations_seen_since_last_resample += len(activation_store)\n\n if self._n_times_resampled < self._max_n_resamples:\n # Collate neuron activity, if in the data collection window. For example in the\n # Anthropic Towards Monosemanticity paper, the window started collecting at 100m\n # activations and stopped at 200m (and then repeated this again a few times until the\n # max times to resample was hit).\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_start:\n detached_neuron_activity = batch_neuron_activity.detach().cpu()\n self._collated_neuron_activity.add_(detached_neuron_activity)\n self._n_activations_collated_since_last_resample += train_batch_size\n\n # Check if we should resample.\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_end:\n # Get resampled dictionary vectors\n resample_res = self.resample_dead_neurons(\n activation_store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Update counters\n self._activations_seen_since_last_resample = 0\n self._n_activations_collated_since_last_resample = 0\n self._n_times_resampled += 1\n\n # Reset the collated neuron activity\n self._collated_neuron_activity.zero_()\n\n return resample_res\n\n return None\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the activation resampler.\"\"\"\n return (\n f\"ActivationResampler(\"\n f\"n_components={self._n_components}, \"\n f\"neuron_activity_window_start={self.neuron_activity_window_end}, \"\n f\"neuron_activity_window_end={self.neuron_activity_window_end}, \"\n f\"max_resamples={self._max_n_resamples}, \"\n f\"resample_dataset_size={self._resample_dataset_size}, \"\n f\"dead_neuron_threshold={self._threshold_is_dead_portion_fires})\"\n )" }, { "identifier": "ParameterUpdateResults", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ParameterUpdateResults:\n \"\"\"Parameter update results from resampling dead neurons.\"\"\"\n\n dead_neuron_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX]\n \"\"\"Dead neuron indices.\"\"\"\n\n dead_encoder_weight_updates: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Dead encoder weight updates.\"\"\"\n\n dead_encoder_bias_updates: Float[Tensor, Axis.DEAD_FEATURE]\n \"\"\"Dead encoder bias updates.\"\"\"\n\n dead_decoder_weight_updates: Float[\n Tensor, Axis.names(Axis.INPUT_OUTPUT_FEATURE, Axis.DEAD_FEATURE)\n ]\n \"\"\"Dead decoder weight updates.\"\"\"" }, { "identifier": "TensorActivationStore", "path": "sparse_autoencoder/activation_store/tensor_store.py", "snippet": "class TensorActivationStore(ActivationStore):\n \"\"\"Tensor Activation Store.\n\n Stores tensors in a (large) tensor of shape (item, neuron). Requires the number of activation\n vectors to be stored to be known in advance. Multiprocess safe.\n\n Extends the `torch.utils.data.Dataset` class to provide a list-based activation store, with\n additional :meth:`append` and :meth:`extend` methods (the latter of which is non-blocking).\n\n Examples:\n Create an empty activation dataset:\n\n >>> import torch\n >>> store = TensorActivationStore(max_items=1000, n_neurons=100, n_components=2)\n\n Add a single activation vector to the dataset (for a component):\n\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=1)\n >>> len(store)\n 1\n\n Add a [batch, neurons] activation tensor to the dataset:\n\n >>> store.empty()\n >>> batch = torch.randn(10, 100)\n >>> store.extend(batch, component_idx=0)\n >>> store.extend(batch, component_idx=1)\n >>> len(store)\n 10\n\n Shuffle the dataset **before passing it to the DataLoader**:\n\n >>> store.shuffle() # Faster than using the DataLoader shuffle argument\n\n Use the dataloader to iterate over the dataset:\n\n >>> loader = torch.utils.data.DataLoader(store, shuffle=False, batch_size=2)\n >>> next_item = next(iter(loader))\n >>> next_item.shape\n torch.Size([2, 2, 100])\n \"\"\"\n\n _data: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)]\n \"\"\"Underlying Tensor Data Store.\"\"\"\n\n _items_stored: list[int]\n \"\"\"Number of items stored.\"\"\"\n\n max_items: int\n \"\"\"Maximum Number of Items to Store.\"\"\"\n\n _n_components: int\n \"\"\"Number of components\"\"\"\n\n @property\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n return self._n_components\n\n @property\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Number of activations stored per component.\"\"\"\n return self._items_stored\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self,\n max_items: PositiveInt,\n n_neurons: PositiveInt,\n n_components: PositiveInt,\n device: torch.device | None = None,\n ) -> None:\n \"\"\"Initialise the Tensor Activation Store.\n\n Args:\n max_items: Maximum number of items to store per component (individual activation\n vectors).\n n_neurons: Number of neurons in each activation vector.\n n_components: Number of components to store (i.e. number of source models).\n device: Device to store the activation vectors on.\n \"\"\"\n self._n_components = n_components\n self._items_stored = [0] * n_components\n self._max_items = max_items\n self._data = torch.empty((max_items, n_components, n_neurons), device=device)\n\n def __len__(self) -> int:\n \"\"\"Length Dunder Method.\n\n Returns the number of activation vectors per component in the dataset.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10_000_000, n_neurons=100, n_components=1)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> len(store)\n 2\n\n Returns:\n The number of activation vectors in the dataset.\n \"\"\"\n # Min as this is the amount of activations that can be fetched by get_item\n return min(self.current_activations_stored_per_component)\n\n def __sizeof__(self) -> int:\n \"\"\"Sizeof Dunder Method.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=100, n_components=1)\n >>> store.__sizeof__() # Pre-allocated tensor of 2x100\n 800\n\n Returns:\n The size of the underlying tensor in bytes.\n \"\"\"\n return self._data.element_size() * self._data.nelement()\n\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get Item Dunder Method.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n index: The index of the tensor to fetch.\n\n Returns:\n The activation store item at the given index.\n \"\"\"\n return self._data[index]\n\n def shuffle(self) -> None:\n \"\"\"Shuffle the Data In-Place.\n\n This is much faster than using the shuffle argument on `torch.utils.data.DataLoader`.\n\n Example:\n >>> import torch\n >>> _seed = torch.manual_seed(42)\n >>> store = TensorActivationStore(max_items=10, n_neurons=1, n_components=1)\n >>> store.append(torch.tensor([0.]), component_idx=0)\n >>> store.append(torch.tensor([1.]), component_idx=0)\n >>> store.append(torch.tensor([2.]), component_idx=0)\n >>> store.shuffle()\n >>> [store[i, 0].item() for i in range(3)]\n [0.0, 2.0, 1.0]\n \"\"\"\n # Generate a permutation of the indices for the active data\n perm = torch.randperm(len(self))\n\n # Use this permutation to shuffle the active data in-place\n self._data[: len(self)] = self._data[perm]\n\n def append(self, item: Float[Tensor, Axis.INPUT_OUTPUT_FEATURE], component_idx: int) -> None:\n \"\"\"Add a single item to the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n item: The item to append to the dataset.\n component_idx: The component index to append the item to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n if self._items_stored[component_idx] + 1 > self._max_items:\n raise StoreFullError\n\n self._data[self._items_stored[component_idx], component_idx] = item.to(\n self._data.device,\n )\n self._items_stored[component_idx] += 1\n\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> None:\n \"\"\"Add a batch to the store.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n\n Args:\n batch: The batch to append to the dataset.\n component_idx: The component index to append the batch to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n n_activation_tensors: int = batch.shape[0]\n if self._items_stored[component_idx] + n_activation_tensors > self._max_items:\n raise StoreFullError\n\n self._data[\n self._items_stored[component_idx] : self._items_stored[component_idx]\n + n_activation_tensors,\n component_idx,\n ] = batch.to(self._data.device)\n self._items_stored[component_idx] += n_activation_tensors\n\n def empty(self) -> None:\n \"\"\"Empty the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n >>> store.empty()\n >>> len(store)\n 0\n \"\"\"\n # We don't need to zero the data, just reset the number of items stored\n self._items_stored = [0 for _ in self._items_stored]" }, { "identifier": "SparseAutoencoder", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoder(Module):\n \"\"\"Sparse Autoencoder Model.\"\"\"\n\n config: SparseAutoencoderConfig\n \"\"\"Model config.\"\"\"\n\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Estimated Geometric Median of the Dataset.\n\n Used for initialising :attr:`tied_bias`.\n \"\"\"\n\n tied_bias: Float[\n Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Tied Bias Parameter.\n\n The same bias is used pre-encoder and post-decoder.\n \"\"\"\n\n pre_encoder_bias: TiedBias\n \"\"\"Pre-Encoder Bias.\"\"\"\n\n encoder: LinearEncoder\n \"\"\"Encoder.\"\"\"\n\n decoder: UnitNormDecoder\n \"\"\"Decoder.\"\"\"\n\n post_decoder_bias: TiedBias\n \"\"\"Post-Decoder Bias.\"\"\"\n\n def __init__(\n self,\n config: SparseAutoencoderConfig,\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n | None = None,\n ) -> None:\n \"\"\"Initialize the Sparse Autoencoder Model.\n\n Args:\n config: Model config.\n geometric_median_dataset: Estimated geometric median of the dataset.\n \"\"\"\n super().__init__()\n\n self.config = config\n\n # Store the geometric median of the dataset (so that we can reset parameters). This is not a\n # parameter itself (the tied bias parameter is used for that), so gradients are disabled.\n tied_bias_shape = shape_with_optional_dimensions(\n config.n_components, config.n_input_features\n )\n if geometric_median_dataset is not None:\n self.geometric_median_dataset = geometric_median_dataset.clone()\n self.geometric_median_dataset.requires_grad = False\n else:\n self.geometric_median_dataset = torch.zeros(tied_bias_shape)\n self.geometric_median_dataset.requires_grad = False\n\n # Initialize the tied bias\n self.tied_bias = Parameter(torch.empty(tied_bias_shape))\n self.initialize_tied_parameters()\n\n # Initialize the components\n self.pre_encoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.PRE_ENCODER)\n\n self.encoder = LinearEncoder(\n input_features=config.n_input_features,\n learnt_features=config.n_learned_features,\n n_components=config.n_components,\n )\n\n self.decoder = UnitNormDecoder(\n learnt_features=config.n_learned_features,\n decoded_features=config.n_input_features,\n n_components=config.n_components,\n )\n\n self.post_decoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.POST_DECODER)\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> ForwardPassResult:\n \"\"\"Forward Pass.\n\n Args:\n x: Input activations (e.g. activations from an MLP layer in a transformer model).\n\n Returns:\n Tuple of learned activations and decoded activations.\n \"\"\"\n x = self.pre_encoder_bias(x)\n learned_activations = self.encoder(x)\n x = self.decoder(learned_activations)\n decoded_activations = self.post_decoder_bias(x)\n\n return ForwardPassResult(learned_activations, decoded_activations)\n\n def initialize_tied_parameters(self) -> None:\n \"\"\"Initialize the tied parameters.\"\"\"\n # The tied bias is initialised as the geometric median of the dataset\n self.tied_bias.data = self.geometric_median_dataset\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters.\"\"\"\n self.initialize_tied_parameters()\n for module in self.network:\n if \"reset_parameters\" in dir(module):\n module.reset_parameters()\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return (\n self.encoder.reset_optimizer_parameter_details\n + self.decoder.reset_optimizer_parameter_details\n )\n\n def post_backwards_hook(self) -> None:\n \"\"\"Hook to be called after each learning step.\n\n This can be used to e.g. constrain weights to unit norm.\n \"\"\"\n self.decoder.constrain_weights_unit_norm()\n\n @staticmethod\n @validate_call\n def get_single_component_state_dict(\n state: SparseAutoencoderState, component_idx: NonNegativeInt\n ) -> dict[str, Tensor]:\n \"\"\"Get the state dict for a single component.\n\n Args:\n state: Sparse Autoencoder state.\n component_idx: Index of the component to get the state dict for.\n\n Returns:\n State dict for the component.\n\n Raises:\n ValueError: If the state dict doesn't contain a components dimension.\n \"\"\"\n # Check the state has a components dimension\n if state.config.n_components is None:\n error_message = (\n \"Trying to load a single component from the state dict, but the state dict \"\n \"doesn't contain a components dimension.\"\n )\n raise ValueError(error_message)\n\n # Return the state dict for the component\n return {key: value[component_idx] for key, value in state.state_dict.items()}\n\n def save(self, file_path: Path) -> None:\n \"\"\"Save the model config and state dict to a file.\n\n Args:\n file_path: Path to save the model to.\n \"\"\"\n file_path.parent.mkdir(parents=True, exist_ok=True)\n state = SparseAutoencoderState(config=self.config, state_dict=self.state_dict())\n torch.save(state, file_path)\n\n @staticmethod\n def load(\n file_path: FILE_LIKE,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from a file.\n\n Args:\n file_path: Path to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n # Load the file\n serialized_state = torch.load(file_path, map_location=torch.device(\"cpu\"))\n state = SparseAutoencoderState.model_validate(serialized_state)\n\n # Initialise the model\n config = SparseAutoencoderConfig(\n n_input_features=state.config.n_input_features,\n n_learned_features=state.config.n_learned_features,\n n_components=state.config.n_components if component_idx is None else None,\n )\n state_dict = (\n SparseAutoencoder.get_single_component_state_dict(state, component_idx)\n if component_idx is not None\n else state.state_dict\n )\n model = SparseAutoencoder(config)\n model.load_state_dict(state_dict)\n\n return model\n\n def save_to_wandb(\n self,\n artifact_name: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n ) -> str:\n \"\"\"Save the model to wandb.\n\n Args:\n artifact_name: A human-readable name for this artifact, which is how you can identify\n this artifact in the UI or reference it in use_artifact calls. Names can contain\n letters, numbers, underscores, hyphens, and dots. The name must be unique across a\n project. Example: \"sweep_name 1e9 activations\".\n directory: Directory to save the model to.\n\n Returns:\n Name of the wandb artifact.\n\n Raises:\n ValueError: If wandb is not initialised.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_name = artifact_name + \".pt\"\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to wandb\n if wandb.run is None:\n error_message = \"Trying to save the model to wandb, but wandb is not initialised.\"\n raise ValueError(error_message)\n artifact = wandb.Artifact(\n artifact_name,\n type=\"model\",\n description=\"Sparse Autoencoder model state, created with `sparse_autoencoder`.\",\n )\n artifact.add_file(str(file_path), name=\"sae-model-state.pt\")\n artifact.save()\n wandb.log_artifact(artifact)\n artifact.wait()\n\n return artifact.source_qualified_name\n\n @staticmethod\n def load_from_wandb(\n wandb_artifact_name: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from wandb.\n\n Args:\n wandb_artifact_name: Name of the wandb artifact to load the model from (e.g.\n \"username/project/artifact_name:version\").\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n api = wandb.Api()\n artifact = api.artifact(wandb_artifact_name, type=\"model\")\n download_path = artifact.download()\n return SparseAutoencoder.load(Path(download_path) / \"sae-model-state.pt\", component_idx)\n\n def save_to_hugging_face(\n self,\n file_name: str,\n repo_id: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n hf_access_token: str | None = None,\n ) -> None:\n \"\"\"Save the model to Hugging Face.\n\n Args:\n file_name: Name of the file (e.g. \"model-something.pt\").\n repo_id: ID of the repo to save the model to.\n directory: Directory to save the model to.\n hf_access_token: Hugging Face access token.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to Hugging Face\n api = HfApi(token=hf_access_token)\n api.upload_file(\n path_or_fileobj=file_path,\n path_in_repo=file_name,\n repo_id=repo_id,\n repo_type=\"model\",\n )\n\n @staticmethod\n def load_from_hugging_face(\n file_name: str,\n repo_id: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from Hugging Face.\n\n Args:\n file_name: File name of the .pt state file.\n repo_id: ID of the repo to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n local_file = hf_hub_download(\n repo_id=repo_id,\n repo_type=\"model\",\n filename=file_name,\n revision=\"main\",\n )\n\n return SparseAutoencoder.load(Path(local_file), component_idx)" }, { "identifier": "AbstractLoss", "path": "sparse_autoencoder/loss/abstract_loss.py", "snippet": "class AbstractLoss(Module, ABC):\n \"\"\"Abstract loss interface.\n\n Interface for implementing batch itemwise loss functions.\n \"\"\"\n\n _modules: dict[str, \"AbstractLoss\"] # type: ignore[assignment] (narrowing)\n \"\"\"Children loss modules.\"\"\"\n\n @abstractmethod\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n\n @abstractmethod\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Batch itemwise loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n\n @final\n def batch_loss(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n ) -> Float[Tensor, Axis.COMPONENT_OPTIONAL]:\n \"\"\"Batch loss (reduced across the batch axis).\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Loss reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n\n Returns:\n Loss for the batch.\n\n Raises:\n ValueError: If the batch reduction type is NONE.\n \"\"\"\n itemwise_loss = self.forward(source_activations, learned_activations, decoded_activations)\n\n # Reduction parameter is over the batch dimension (not the component dimension)\n match batch_reduction:\n case LossReductionType.MEAN:\n return itemwise_loss.mean(dim=0)\n case LossReductionType.SUM:\n return itemwise_loss.sum(dim=0)\n case LossReductionType.NONE:\n error_message = \"Batch reduction type NONE not supported.\"\n raise ValueError(error_message)\n\n def scalar_loss_with_log(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n component_reduction: LossReductionType = LossReductionType.NONE,\n ) -> LossResultWithMetrics:\n \"\"\"Scalar loss (reduced across the batch and component axis) with logging.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Batch reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n component_reduction: Component reduction type.\n\n Returns:\n Tuple of the batch scalar loss and a dict of any properties to log.\n \"\"\"\n children_loss_scalars: list[\n Float[Tensor, Axis.COMPONENT] | Float[Tensor, Axis.SINGLE_ITEM]\n ] = []\n metrics: list[MetricResult] = []\n\n # If the loss module has children (e.g. it is a reducer):\n if len(self._modules) > 0:\n for loss_module in self._modules.values():\n child_loss, child_metrics = loss_module.scalar_loss_with_log(\n source_activations,\n learned_activations,\n decoded_activations,\n batch_reduction=batch_reduction,\n # Note we don't pass through component reduction, as that would prevent logging\n # component-wise losses in reducers.\n )\n children_loss_scalars.append(child_loss)\n metrics.extend(child_metrics)\n\n # Get the total loss & metric\n current_module_loss = torch.stack(children_loss_scalars).sum(0)\n\n # Otherwise if it is a leaf loss module:\n else:\n current_module_loss = self.batch_loss(\n source_activations, learned_activations, decoded_activations, batch_reduction\n )\n # Add in the current loss module's metric\n log = MetricResult(\n location=MetricLocation.TRAIN,\n name=\"loss\",\n postfix=self.log_name(),\n component_wise_values=current_module_loss.unsqueeze(0)\n if current_module_loss.ndim == 0\n else current_module_loss,\n )\n metrics.append(log)\n\n # Reduce the current module loss across the component dimension\n match component_reduction:\n case LossReductionType.MEAN:\n current_module_loss = current_module_loss.mean(0)\n case LossReductionType.SUM:\n current_module_loss = current_module_loss.sum(0)\n case LossReductionType.NONE:\n pass\n\n return LossResultWithMetrics(loss=current_module_loss, loss_metrics=metrics)\n\n @final\n def __call__(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n reduction: LossReductionType = LossReductionType.MEAN,\n ) -> LossResultWithMetrics:\n \"\"\"Batch scalar loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n reduction: Loss reduction type. Typically you would choose LossReductionType.MEAN to\n make the loss independent of the batch size.\n\n Returns:\n Tuple of the batch scalar loss and a dict of any properties to log.\n \"\"\"\n return self.scalar_loss_with_log(\n source_activations, learned_activations, decoded_activations, reduction\n )" }, { "identifier": "LossReductionType", "path": "sparse_autoencoder/loss/abstract_loss.py", "snippet": "class LossReductionType(LowercaseStrEnum):\n \"\"\"Loss reduction type.\"\"\"\n\n MEAN = \"mean\"\n\n SUM = \"sum\"\n\n NONE = \"none\"" }, { "identifier": "MetricsContainer", "path": "sparse_autoencoder/metrics/metrics_container.py", "snippet": "class MetricsContainer:" }, { "identifier": "TrainMetricData", "path": "sparse_autoencoder/metrics/train/abstract_train_metric.py", "snippet": "class TrainMetricData:\n \"\"\"Train metric data.\"\"\"\n\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Input activations.\"\"\"\n\n learned_activations: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Learned activations.\"\"\"\n\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Decoded activations.\"\"\"\n\n def __init__(\n self,\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> None:\n \"\"\"Initialize the train metric data.\"\"\"\n self.input_activations = add_component_axis_if_missing(\n input_activations, dimensions_without_component=2\n ).detach()\n self.learned_activations = add_component_axis_if_missing(\n learned_activations, dimensions_without_component=2\n ).detach()\n self.decoded_activations = add_component_axis_if_missing(\n decoded_activations, dimensions_without_component=2\n ).detach()" }, { "identifier": "ValidationMetricData", "path": "sparse_autoencoder/metrics/validate/abstract_validate_metric.py", "snippet": "class ValidationMetricData:\n \"\"\"Validation metric data.\n\n Dataclass that always has a component axis.\n \"\"\"\n\n source_model_loss: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss (without the SAE).\"\"\"\n\n source_model_loss_with_reconstruction: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss with SAE reconstruction.\"\"\"\n\n source_model_loss_with_zero_ablation: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss with zero ablation.\"\"\"\n\n def __init__(\n self,\n source_model_loss: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)],\n source_model_loss_with_reconstruction: Float[\n Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)\n ],\n source_model_loss_with_zero_ablation: Float[\n Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)\n ],\n ) -> None:\n \"\"\"Initialize the validation metric data.\"\"\"\n self.source_model_loss = add_component_axis_if_missing(source_model_loss).detach()\n self.source_model_loss_with_reconstruction = add_component_axis_if_missing(\n source_model_loss_with_reconstruction\n ).detach()\n self.source_model_loss_with_zero_ablation = add_component_axis_if_missing(\n source_model_loss_with_zero_ablation\n ).detach()" }, { "identifier": "AbstractOptimizerWithReset", "path": "sparse_autoencoder/optimizer/abstract_optimizer.py", "snippet": "class AbstractOptimizerWithReset(Optimizer, ABC):\n \"\"\"Abstract optimizer with reset.\n\n When implementing this interface, we recommend adding a `named_parameters` argument to the\n constructor, which can be obtained from `named_parameters=model.named_parameters()` by the end\n user. This is so that the optimizer can find the parameters to reset.\n \"\"\"\n\n @abstractmethod\n def reset_state_all_parameters(self) -> None:\n \"\"\"Reset the state for all parameters.\n\n Resets any optimizer state (e.g. momentum). This is for use after manually editing model\n parameters (e.g. with activation resampling).\n \"\"\"\n\n @abstractmethod\n def reset_neurons_state(\n self,\n parameter: Parameter,\n neuron_indices: Int64[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)],\n axis: int,\n component_idx: int,\n ) -> None:\n \"\"\"Reset the state for specific neurons, on a specific parameter.\n\n Args:\n parameter: The parameter to reset, e.g. `encoder.Linear.weight`, `encoder.Linear.bias`,\n neuron_indices: The indices of the neurons to reset.\n axis: The axis of the parameter to reset.\n component_idx: The component index of the state values to reset.\n\n Raises:\n ValueError: If the parameter name is not found.\n \"\"\"" }, { "identifier": "SourceDataset", "path": "sparse_autoencoder/source_data/abstract_dataset.py", "snippet": "class SourceDataset(ABC, Generic[HuggingFaceDatasetItem]):\n \"\"\"Abstract source dataset.\n\n Source dataset that is used to generate the activations dataset (by running forward passes of\n the source model with this data). It should contain prompts that have been tokenized with no\n padding tokens (apart from an optional single first padding token). This enables efficient\n generation of the activations dataset.\n\n Wraps an HuggingFace IterableDataset.\n \"\"\"\n\n context_size: int\n \"\"\"Number of tokens in the context window.\n\n The paper *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n \"\"\"\n\n dataset: Dataset | IterableDataset\n \"\"\"Underlying HuggingFace Dataset.\n\n Warning:\n Hugging Face `Dataset` objects are confusingly not the same as PyTorch `Dataset` objects.\n \"\"\"\n\n _dataset_column_name: str\n \"\"\"Dataset column name for the prompts.\"\"\"\n\n @abstractmethod\n def preprocess(\n self,\n source_batch: HuggingFaceDatasetItem,\n *,\n context_size: int,\n ) -> TokenizedPrompts:\n \"\"\"Preprocess function.\n\n Takes a `preprocess_batch_size` ($m$) batch of source data (which may e.g. include string\n prompts), and returns a dict with a single key of `input_ids` and a value of an arbitrary\n length list ($n$) of tokenized prompts. Note that $m$ does not have to be equal to $n$.\n\n Applied to the dataset with the [Hugging Face\n Dataset](https://huggingface.co/docs/datasets/v2.14.5/en/package_reference/main_classes#datasets.Dataset.map)\n `map` function.\n\n Warning:\n The returned tokenized prompts should not have any padding tokens (apart from an\n optional single first padding token).\n\n Args:\n source_batch: A batch of source data. For example, with The Pile dataset this would be a\n dict including the key \"text\" with a value of a list of strings (not yet tokenized).\n context_size: The context size to use when returning a list of tokenized prompts.\n *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n\n Returns:\n Tokenized prompts.\n \"\"\"\n\n @abstractmethod\n @validate_call\n def __init__(\n self,\n dataset_path: str,\n dataset_split: str,\n context_size: PositiveInt,\n buffer_size: PositiveInt = 1000,\n dataset_dir: str | None = None,\n dataset_files: str | Sequence[str] | Mapping[str, str | Sequence[str]] | None = None,\n dataset_column_name: str = \"input_ids\",\n n_processes_preprocessing: PositiveInt | None = None,\n preprocess_batch_size: PositiveInt = 1000,\n *,\n pre_download: bool = False,\n ):\n \"\"\"Initialise the dataset.\n\n Loads the dataset with streaming from HuggingFace, dds preprocessing and shuffling to the\n underlying Hugging Face `IterableDataset`.\n\n Args:\n dataset_path: The path to the dataset on Hugging Face.\n dataset_split: Dataset split (e.g. `train`).\n context_size: The context size to use when returning a list of tokenized prompts.\n *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n buffer_size: The buffer size to use when shuffling the dataset when streaming. When\n streaming a dataset, this just pre-downloads at least `buffer_size` items and then\n shuffles just that buffer. Note that the generated activations should also be\n shuffled before training the sparse autoencoder, so a large buffer may not be\n strictly necessary here. Note also that this is the number of items in the dataset\n (e.g. number of prompts) and is typically significantly less than the number of\n tokenized prompts once the preprocessing function has been applied.\n dataset_dir: Defining the `data_dir` of the dataset configuration.\n dataset_files: Path(s) to source data file(s).\n dataset_column_name: The column name for the prompts.\n n_processes_preprocessing: The number of processes to use for preprocessing.\n preprocess_batch_size: The batch size to use just for preprocessing the dataset (e.g.\n tokenizing prompts).\n pre_download: Whether to pre-download the whole dataset.\n\n Raises:\n TypeError: If the loaded dataset is not a Hugging Face `Dataset` or `IterableDataset`.\n \"\"\"\n self.context_size = context_size\n self._dataset_column_name = dataset_column_name\n\n # Load the dataset\n should_stream = not pre_download\n dataset = load_dataset(\n dataset_path,\n streaming=should_stream,\n split=dataset_split,\n data_dir=dataset_dir,\n data_files=dataset_files,\n verification_mode=VerificationMode.NO_CHECKS, # As it fails when data_files is set\n )\n\n # Setup preprocessing (we remove all columns except for input ids)\n remove_columns: list[str] = list(next(iter(dataset)).keys())\n if \"input_ids\" in remove_columns:\n remove_columns.remove(\"input_ids\")\n\n if pre_download:\n if not isinstance(dataset, Dataset):\n error_message = (\n f\"Expected Hugging Face dataset to be a Dataset when pre-downloading, but got \"\n f\"{type(dataset)}.\"\n )\n raise TypeError(error_message)\n\n # Download the whole dataset\n mapped_dataset = dataset.map(\n self.preprocess,\n batched=True,\n batch_size=preprocess_batch_size,\n fn_kwargs={\"context_size\": context_size},\n remove_columns=remove_columns,\n num_proc=n_processes_preprocessing,\n )\n self.dataset = mapped_dataset.shuffle()\n else:\n # Setup approximate shuffling. As the dataset is streamed, this just pre-downloads at\n # least `buffer_size` items and then shuffles just that buffer.\n # https://huggingface.co/docs/datasets/v2.14.5/stream#shuffle\n if not isinstance(dataset, IterableDataset):\n error_message = (\n f\"Expected Hugging Face dataset to be an IterableDataset when streaming, but \"\n f\"got {type(dataset)}.\"\n )\n raise TypeError(error_message)\n\n mapped_dataset = dataset.map(\n self.preprocess,\n batched=True,\n batch_size=preprocess_batch_size,\n fn_kwargs={\"context_size\": context_size},\n remove_columns=remove_columns,\n )\n self.dataset = mapped_dataset.shuffle(buffer_size=buffer_size) # type: ignore\n\n @final\n def __iter__(self) -> Any: # noqa: ANN401\n \"\"\"Iterate Dunder Method.\n\n Enables direct access to :attr:`dataset` with e.g. `for` loops.\n \"\"\"\n return self.dataset.__iter__()\n\n @final\n def get_dataloader(\n self, batch_size: int, num_workers: NonNegativeInt = 0\n ) -> DataLoader[TorchTokenizedPrompts]:\n \"\"\"Get a PyTorch DataLoader.\n\n Args:\n batch_size: The batch size to use.\n num_workers: Number of CPU workers.\n\n Returns:\n PyTorch DataLoader.\n \"\"\"\n torch_dataset: TorchDataset[TorchTokenizedPrompts] = self.dataset.with_format(\"torch\") # type: ignore\n\n return DataLoader[TorchTokenizedPrompts](\n torch_dataset,\n batch_size=batch_size,\n # Shuffle is most efficiently done with the `shuffle` method on the dataset itself, not\n # here.\n shuffle=False,\n num_workers=num_workers,\n )" }, { "identifier": "TorchTokenizedPrompts", "path": "sparse_autoencoder/source_data/abstract_dataset.py", "snippet": "class TorchTokenizedPrompts(TypedDict):\n \"\"\"Tokenized prompts prepared for PyTorch.\"\"\"\n\n input_ids: Int[Tensor, Axis.names(Axis.SOURCE_DATA_BATCH, Axis.POSITION)]" }, { "identifier": "replace_activations_hook", "path": "sparse_autoencoder/source_model/replace_activations_hook.py", "snippet": "def replace_activations_hook(\n value: Tensor,\n hook: HookPoint, # noqa: ARG001\n sparse_autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n component_idx: int | None = None,\n n_components: int | None = None,\n) -> Tensor:\n \"\"\"Replace activations hook.\n\n This should be pre-initialised with `functools.partial`.\n\n Args:\n value: The activations to replace.\n hook: The hook point.\n sparse_autoencoder: The sparse autoencoder.\n component_idx: The component index to replace the activations with, if just replacing\n activations for a single component. Requires the model to have a component axis.\n n_components: The number of components that the SAE is trained on.\n\n Returns:\n Replaced activations.\n\n Raises:\n RuntimeError: If `component_idx` is specified, but the model does not have a component\n \"\"\"\n # Squash to just have a \"*items\" and a \"batch\" dimension\n original_shape = value.shape\n\n squashed_value: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)] = value.view(\n -1, value.size(-1)\n )\n\n if component_idx is not None:\n if n_components is None:\n error_message = \"The number of model components must be set if component_idx is set.\"\n raise RuntimeError(error_message)\n\n # The approach here is to run a forward pass with dummy values for all components other than\n # the one we want to replace. This is done by expanding the inputs to the SAE for a specific\n # component across all components. We then simply discard the activations for all other\n # components.\n expanded_shape = [\n squashed_value.shape[0],\n n_components,\n squashed_value.shape[-1],\n ]\n expanded = squashed_value.unsqueeze(1).expand(*expanded_shape)\n\n _learned_activations, output_activations = sparse_autoencoder.forward(expanded)\n component_output_activations = output_activations[:, component_idx]\n\n return component_output_activations.view(*original_shape)\n\n # Get the output activations from a forward pass of the SAE\n _learned_activations, output_activations = sparse_autoencoder.forward(squashed_value)\n\n # Reshape to the original shape\n return output_activations.view(*original_shape)" }, { "identifier": "store_activations_hook", "path": "sparse_autoencoder/source_model/store_activations_hook.py", "snippet": "def store_activations_hook(\n value: Float[Tensor, Axis.names(Axis.ANY)],\n hook: HookPoint, # noqa: ARG001\n store: ActivationStore,\n reshape_method: ReshapeActivationsFunction = reshape_to_last_dimension,\n component_idx: int = 0,\n) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Store Activations Hook.\n\n Useful for getting just the specific activations wanted, rather than the full cache.\n\n Example:\n First we'll need a source model from TransformerLens and an activation store.\n\n >>> from functools import partial\n >>> from transformer_lens import HookedTransformer\n >>> from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore\n >>> store = TensorActivationStore(max_items=1000, n_neurons=64, n_components=1)\n >>> model = HookedTransformer.from_pretrained(\"tiny-stories-1M\")\n Loaded pretrained model tiny-stories-1M into HookedTransformer\n\n Next we can add the hook to specific neurons (in this case the first MLP neurons), and\n create the tokens for a forward pass.\n\n >>> model.add_hook(\n ... \"blocks.0.hook_mlp_out\", partial(store_activations_hook, store=store)\n ... )\n >>> tokens = model.to_tokens(\"Hello world\")\n >>> tokens.shape\n torch.Size([1, 3])\n\n Then when we run the model, we should get one activation vector for each token (as we just\n have one batch item). Note we also set `stop_at_layer=1` as we don't need the logits or any\n other activations after the hook point that we've specified (in this case the first MLP\n layer).\n\n >>> _output = model.forward(\"Hello world\", stop_at_layer=1) # Change this layer as required\n >>> len(store)\n 3\n\n Args:\n value: The activations to store.\n hook: The hook point.\n store: The activation store. This should be pre-initialised with `functools.partial`.\n reshape_method: The method to reshape the activations before storing them.\n component_idx: The component index of the activations to store.\n\n Returns:\n Unmodified activations.\n \"\"\"\n reshaped: Float[\n Tensor, Axis.names(Axis.STORE_BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = reshape_method(value)\n\n store.extend(reshaped, component_idx=component_idx)\n\n # Return the unmodified value\n return value" }, { "identifier": "zero_ablate_hook", "path": "sparse_autoencoder/source_model/zero_ablate_hook.py", "snippet": "def zero_ablate_hook(\n value: Tensor,\n hook: HookPoint, # noqa: ARG001\n) -> Tensor:\n \"\"\"Zero ablate hook.\n\n Args:\n value: The activations to store.\n hook: The hook point.\n\n Example:\n >>> dummy_hook_point = HookPoint()\n >>> value = torch.ones(2, 3)\n >>> zero_ablate_hook(value, dummy_hook_point)\n tensor([[0., 0., 0.],\n [0., 0., 0.]])\n\n Returns:\n Replaced activations.\n \"\"\"\n return torch.zeros_like(value)" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" }, { "identifier": "get_model_device", "path": "sparse_autoencoder/train/utils/get_model_device.py", "snippet": "def get_model_device(model: Module | DataParallel | DeepSpeedEngine) -> torch.device:\n \"\"\"Get the device on which a PyTorch model is on.\n\n Args:\n model: The PyTorch model.\n\n Returns:\n The device ('cuda' or 'cpu') where the model is located.\n\n Raises:\n ValueError: If the model has no parameters.\n \"\"\"\n # Deepspeed models already have a device property, so just return that\n if hasattr(model, \"device\"):\n return model.device\n\n # Check if the model has parameters\n if len(list(model.parameters())) == 0:\n exception_message = \"The model has no parameters.\"\n raise ValueError(exception_message)\n\n # Return the device of the first parameter\n return next(model.parameters()).device" } ]
from collections.abc import Iterator from functools import partial from pathlib import Path from tempfile import gettempdir from typing import TYPE_CHECKING, final from deepspeed import DeepSpeedEngine from jaxtyping import Float, Int, Int64 from pydantic import NonNegativeInt, PositiveInt, validate_call from torch import Tensor from torch.nn.parallel import DataParallel from torch.optim.lr_scheduler import LRScheduler from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformer_lens import HookedTransformer from sparse_autoencoder.activation_resampler.activation_resampler import ( ActivationResampler, ParameterUpdateResults, ) from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder from sparse_autoencoder.loss.abstract_loss import AbstractLoss, LossReductionType from sparse_autoencoder.metrics.metrics_container import MetricsContainer, default_metrics from sparse_autoencoder.metrics.train.abstract_train_metric import TrainMetricData from sparse_autoencoder.metrics.validate.abstract_validate_metric import ValidationMetricData from sparse_autoencoder.optimizer.abstract_optimizer import AbstractOptimizerWithReset from sparse_autoencoder.source_data.abstract_dataset import SourceDataset, TorchTokenizedPrompts from sparse_autoencoder.source_model.replace_activations_hook import replace_activations_hook from sparse_autoencoder.source_model.store_activations_hook import store_activations_hook from sparse_autoencoder.source_model.zero_ablate_hook import zero_ablate_hook from sparse_autoencoder.tensor_types import Axis from sparse_autoencoder.train.utils.get_model_device import get_model_device from sparse_autoencoder.metrics.abstract_metric import MetricResult import torch import wandb
18,535
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use.""" metrics: MetricsContainer """Metrics to use."""
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use.""" metrics: MetricsContainer """Metrics to use."""
optimizer: AbstractOptimizerWithReset
9
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names_input = self.args.target_config.keys()\n self.target_names = [x for x in self.args.target_config.keys() if self.args.target_config[x][\"in_NPT_loss\"]]\n self.num_targets_input = len(self.target_names_input) #Includes all targets, incl. zero-shot fitness predictions\n self.num_targets = len(self.target_names) #Number of actual targets we want to predict\n self.MSA_sample_sequences = None\n self.training_sample_sequences_indices = None\n self.device = None\n self.optimizer = None\n self.model_type = args.model_type\n self.PNPT_ensemble_test_num_seeds = -1\n self.PNPT_no_reconstruction_error = False\n self.deactivate_col_attention = False\n self.tranception_attention = False\n \n assert self.args.embed_dim % self.args.attention_heads ==0, \"Embedding size {} needs to be a multiple of number of heads {}\".format(self.args.embed_dim, self.args.attention_heads)\n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n self.aa_embedding_dim = self.aa_embedding.embed_tokens.weight.shape[-1]\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = self.alphabet\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.aa_positions_embedding = LearnedPositionalEmbedding(\n self.args.max_positions,\n self.args.embed_dim,\n self.padding_idx,\n )\n self.aa_embedding_dim = self.args.embed_dim\n\n if self.aa_embedding_dim != self.args.embed_dim: #Need to project internally\n self.token_embedding_projection = nn.Linear(\n self.aa_embedding_dim,\n self.args.embed_dim\n )\n self.token_embedding_expansion = nn.Linear(\n self.args.embed_dim,\n self.aa_embedding_dim\n )\n\n self.target_embedding = nn.ModuleDict(\n { \n target_name:\n nn.Linear(\n self.args.target_config[target_name][\"dim\"] + 1, #Need to add one as we append the mask flag to each input target \n self.args.embed_dim\n )\n if self.args.target_config[target_name][\"type\"]==\"continuous\"\n else \n nn.Embedding(\n self.args.target_config[target_name][\"dim\"],\n self.args.embed_dim\n )\n for target_name in self.target_names_input\n }\n )\n \n self.dropout_module = nn.Dropout(self.args.dropout)\n\n self.layers = nn.ModuleList(\n [\n AxialTransformerLayer(\n self.args.embed_dim,\n self.args.ffn_embed_dim,\n self.args.attention_heads,\n self.args.dropout,\n self.args.attention_dropout,\n self.args.activation_dropout,\n getattr(self.args, \"max_tokens_per_msa\", self.args.max_tokens_per_msa),\n self.deactivate_col_attention,\n self.tranception_attention,\n self.num_targets_input,\n )\n for _ in range(self.args.num_protein_npt_layers)\n ]\n )\n self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)\n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n weight = self.aa_embedding.embed_tokens.weight\n elif self.args.aa_embeddings == \"Tranception\":\n weight = self.aa_embedding.lm_head.weight\n else:\n weight = self.aa_embedding.weight\n\n self.lm_head = RobertaLMHead(\n embed_dim=self.aa_embedding_dim,\n output_dim=self.alphabet_size,\n weight=weight\n )\n \n target_pred_input_dim = self.args.embed_dim\n\n if args.target_prediction_model==\"MLP\": \n self.layer_pre_head = nn.ModuleDict(\n {\n target_name:\n nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n ) \n for target_name in self.target_names\n }\n )\n \n if args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n \n if args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n \n if self.args.target_prediction_head == \"Target_embeddings_only\":\n target_pred_input_dim = target_pred_input_dim\n elif self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\":\n target_pred_input_dim = target_pred_input_dim * (1 + self.num_targets_input)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1e-4)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n \n def forward(self, tokens, targets=None, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[], need_head_weights=False):\n padding_mask = tokens.eq(self.padding_idx) \n if not padding_mask.any(): padding_mask = None\n \n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size() # N, B, L (seqs with labels, seqs in MSA, seq length)\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size() # N, L (seqs with labels, seq length)\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # N, B, L, D\n x = x[:,0,:,:] # N, L, D. #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n x = x + self.aa_positions_embedding(tokens.view(batch_size, seqlen)).view(x.size()) # Need position embedding in PNPT since we will apply axial attention\n else:\n print(\"AA embeddings not recognized\")\n sys.exit(0)\n \n if self.aa_embedding_dim != self.args.embed_dim: x = self.token_embedding_projection(x)\n \n if self.args.target_prediction_head != \"Target_embeddings_and_AA_embeddings_mean_pooled\": #We mix AA embeddings pre NPT\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n\n x = x.view(1, batch_size, seqlen, self.args.embed_dim) # 1, N, L, D\n \n #Dimensions for each target (there are self.num_targets of them):\n y = []\n for target_name in self.target_names_input:\n num_sequences_with_target, dim_targets = targets[target_name].shape # N, D_t #In most cases dim_targets = D_t = 2 (original dimension of continuous input + 1 dim for mask)\n y.append(self.target_embedding[target_name](targets[target_name]).view(num_sequences_with_target,1,self.args.embed_dim))\n y = torch.cat(y, dim=-2) #concatenate across second to last dimension # N, num_targets, D\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim), \"Error in y shape: {}\".format(y.shape)\n y = y.view(1, num_sequences_with_target, self.num_targets_input, self.args.embed_dim) # 1, N, num_targets, D\n \n #Concatenate AA tokens and targets\n x = torch.cat((x,y),dim=-2) # 1, N, (L+num_targets), D\n x = self.emb_layer_norm_before(x)\n x = self.dropout_module(x)\n\n if padding_mask is not None:\n padding_mask_with_targets = torch.zeros(num_MSAs_in_batch, num_sequences_in_alignments, seqlen + self.num_targets_input)\n padding_mask_with_targets[...,:seqlen] = padding_mask\n padding_mask = padding_mask_with_targets\n x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers: hidden_representations[0] = x\n if need_head_weights:\n row_attn_weights = []\n col_attn_weights = []\n\n # 1 x N x L x D -> N x L x 1 x D\n x = x.permute(1, 2, 0, 3)\n for layer_idx, layer in enumerate(self.layers):\n x = layer(\n x,\n self_attn_padding_mask=padding_mask,\n need_head_weights=need_head_weights,\n )\n if need_head_weights:\n x, col_attn, row_attn = x\n col_attn_weights.append(col_attn.permute(2, 0, 1, 3, 4).cpu())\n row_attn_weights.append(row_attn.permute(1, 0, 2, 3).cpu())\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.permute(2, 0, 1, 3)\n x = self.emb_layer_norm_after(x)\n x = x.permute(2, 0, 1, 3) # N x L x 1 x D -> 1 x N x L x D\n assert x.shape == (1, num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim), \"Error with axial transformer\"\n # last hidden representation should have layer norm applied\n if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x\n \n # Loss over NPT MLM objective\n if self.aa_embedding_dim != self.args.embed_dim:\n logits_protein_sequence = self.lm_head(self.token_embedding_expansion(x[...,:seqlen,:]))\n else:\n logits_protein_sequence = self.lm_head(x[...,:seqlen,:]) #Remove dependency on targets for final AA predictions. logits size: (1, N, L, Vocab)\n \n x = x.view(num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim)\n x, y = x[:,:seqlen,:], x[:,seqlen:,:] # (N,L,D) and (N,num_targets,D)\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim)\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n x = x.mean(dim=-2) # N, D\n y = y.view(num_sequences_with_target,self.num_targets_input * self.args.embed_dim)\n y = torch.cat((x,y),dim=-1) # N, (1+num_targets) * D\n \n target_predictions = {}\n for target_index, target_name in enumerate(self.target_names):\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n target_predictions[target_name] = self.target_pred_head[target_name](y).view(-1) #We use the concatenated X and target embeddings (all of them) to predict each target\n else:\n if self.args.target_prediction_model == \"MLP\": y[:,target_index,:] = self.layer_pre_head[target_name](y[:,target_index,:])\n target_predictions[target_name] = self.target_pred_head[target_name](y[:,target_index,:]).view(-1) #input the embedding with the relevant target_index\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n \n result = {\"logits_protein_sequence\": logits_protein_sequence, \"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n if need_head_weights:\n col_attentions = torch.stack(col_attn_weights, 1)\n row_attentions = torch.stack(row_attn_weights, 1)\n result[\"col_attentions\"] = col_attentions\n result[\"row_attentions\"] = row_attentions\n\n return result\n\n def forward_with_uncertainty(self, tokens, targets, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10, number_of_mutated_seqs_to_score=None):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output)\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad():\n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, targets, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n \n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def protein_npt_loss(self, token_predictions_logits, token_labels, target_predictions, target_labels, MLM_reconstruction_loss_weight, label_smoothing=0.0):\n target_prediction_loss_weight = 1.0 - MLM_reconstruction_loss_weight\n total_loss = 0.0\n if (token_labels is not None) and (MLM_reconstruction_loss_weight > 0.0):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None: token_labels = token_labels[:,0,:] #Only keep the token labels for seq to score. Drops the token labels for MSA sequences\n masked_lm_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(token_predictions_logits.reshape(-1, self.alphabet_size), token_labels.reshape(-1))\n reconstruction_loss = masked_lm_loss\n total_loss += MLM_reconstruction_loss_weight * reconstruction_loss\n else:\n reconstruction_loss = torch.tensor(0.0)\n target_prediction_loss = {}\n for target_name in self.target_names:\n if self.args.target_config[target_name][\"in_NPT_loss\"]:\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n loss_masked_targets = ~target_labels[target_name].eq(-100) #Masked items are the ones for which the label was not set to -100\n if loss_masked_targets.sum()==0 or torch.isnan(target_labels[target_name][loss_masked_targets]).sum() > 0: #First condition true if we dont mask anything (eg., all target missing at eval). Second condition true if we force-mask one value at train time (to satisfy min_num_labels_masked in mast_target()) and corresponding target value is missing\n tgt_loss = torch.tensor(0.0)\n else:\n tgt_loss = MSELoss(reduction=\"mean\")(target_predictions[target_name][loss_masked_targets], target_labels[target_name][loss_masked_targets]) #we do not average the loss per batch, so that it's easier to do 1 full average across all batches\n if torch.isnan(tgt_loss).sum() > 0:\n print(\"Detected nan loss\")\n print(target_predictions[target_name])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1)) # Note: we dont add one to the # of categories in the CE loss here (we dont predict <mask>)\n target_prediction_loss[target_name] = tgt_loss\n \n total_loss += target_prediction_loss_weight * target_prediction_loss[target_name]\n return total_loss, reconstruction_loss, target_prediction_loss\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n if self.optimizer is None:\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "AugmentedPropertyPredictor", "path": "baselines/model.py", "snippet": "class AugmentedPropertyPredictor(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n print(\"Alphabet: {}\".format(alphabet))\n print(\"Alphabet size: {}\".format(self.alphabet_size))\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names = self.args.target_config.keys() \n self.MSA_sample_sequences = None \n self.device = None\n self.model_type = args.model_type \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n if self.args.aa_embeddings == \"MSA_Transformer\": self.args.seq_len = self.args.MSA_seq_len #If MSA does not cover full sequence length, we adjust seq_len param to be MSA_len (sequences truncated as needed in preprocessing)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Sequential(\n nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n ),\n nn.ReLU()\n )\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n self.args.target_prediction_head == \"One_hot_encoding\"\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = get_tranception_tokenizer()\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n self.config = config\n else:\n print(\"Error: Specified AA embedding invalid\")\n sys.exit(0)\n\n if self.args.aa_embeddings != \"One_hot_encoding\": \n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n self.dropout_module = nn.Dropout(self.args.dropout)\n\n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\":\n target_pred_input_dim = self.args.embed_dim\n elif self.args.target_prediction_head == \"One_hot_encoding\":\n target_pred_input_dim = (self.args.seq_len + 1) * self.alphabet_size if args.target_prediction_model!=\"CNN\" else self.alphabet_size #Add one for the BOS token\n else:\n print(self.args.target_prediction_head)\n print(\"Error: Specified embedding aggregation invalid\")\n sys.exit(0)\n \n if args.target_prediction_model==\"MLP\":\n self.layer_pre_head = nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n elif args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads if self.args.attention_heads is not None else 4,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n elif args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n target_pred_input_dim = target_pred_input_dim if self.args.target_prediction_head != \"One_hot_encoding\" else target_pred_input_dim * (self.args.seq_len + 1)\n elif args.target_prediction_model==\"light_attention\":\n # Adapted from Stark et al (https://github.com/HannesStark/protein-localization)\n self.feature_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.attention_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(self.args.dropout)\n self.linear = nn.Sequential(\n nn.Linear(2 * self.args.embed_dim, 32),\n nn.Dropout(self.args.dropout),\n nn.ReLU(),\n nn.BatchNorm1d(32)\n )\n target_pred_input_dim = 32\n elif args.target_prediction_model==\"linear\":\n pass\n else:\n print(\"Error: Specified layer_pre_head invalid\")\n sys.exit(0)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1.0)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names #If multiple targets, we learn a separate linear head for each separately\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n\n def forward(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[]):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size()\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size()\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # B, N, L, D\n x = x[:,0,:,:] #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings == \"Tranception\":\n processed_batch = {'input_ids': tokens, 'labels': tokens}\n output = self.aa_embedding(**processed_batch, return_dict=True, output_hidden_states=True)\n x = output.hidden_states[0]\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n x = nn.functional.one_hot(tokens, num_classes=self.alphabet_size).view(batch_size,-1).float()\n if self.args.target_prediction_model == \"CNN\": x = x.view(batch_size,seqlen,self.alphabet_size)\n\n if self.args.aa_embeddings != \"One_hot_encoding\":\n x = self.emb_layer_norm_after(x)\n x = self.dropout_module(x)\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n elif self.args.target_prediction_model==\"light_attention\":\n x = x.permute(0,2,1) #N, D, L\n o = self.feature_convolution(x) \n o = self.dropout(o)\n attention = self.attention_convolution(x)\n o1 = torch.sum(o * self.softmax(attention), dim=-1)\n o2, _ = torch.max(o, dim=-1)\n o = torch.cat([o1, o2], dim=-1)\n x = self.linear(o)\n \n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\": x = x.mean(dim=-2)\n \n if self.args.target_prediction_model == \"MLP\": x = self.layer_pre_head(x)\n \n target_predictions = {}\n for target_name in self.target_names:\n target_predictions[target_name] = self.target_pred_head[target_name](x).view(-1)\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n\n result = {\"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n return result\n \n def forward_with_uncertainty(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output).\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad(): \n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n\n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def prediction_loss(self, target_predictions, target_labels, label_smoothing=0.1):\n total_target_prediction_loss = 0.0\n target_prediction_loss_dict = {}\n for target_name in self.target_names:\n non_missing_target_indicator = ~torch.isnan(target_labels[target_name])\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n tgt_loss = MSELoss(reduction=\"sum\")(target_predictions[target_name][non_missing_target_indicator], target_labels[target_name][non_missing_target_indicator])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"none\",label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1))\n target_prediction_loss_dict[target_name] = tgt_loss\n total_target_prediction_loss += tgt_loss\n return total_target_prediction_loss, target_prediction_loss_dict\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "Alphabet", "path": "utils/esm/data.py", "snippet": "class Alphabet(object):\n def __init__(\n self,\n standard_toks: Sequence[str],\n prepend_toks: Sequence[str] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\"),\n append_toks: Sequence[str] = (\"<cls>\", \"<mask>\", \"<sep>\"),\n prepend_bos: bool = True,\n append_eos: bool = False,\n use_msa: bool = False,\n ):\n #ESM Alphabet: {'<cls>': 0, '<pad>': 1, '<eos>': 2, '<unk>': 3, 'L': 4, 'A': 5, 'G': 6, 'V': 7, 'S': 8, 'E': 9, 'R': 10, 'T': 11, 'I': 12, 'D': 13, 'P': 14, 'K': 15, 'Q': 16, 'N': 17, 'F': 18, 'Y': 19, 'M': 20, 'H': 21, 'W': 22, 'C': 23, 'X': 24, 'B': 25, 'U': 26, 'Z': 27, 'O': 28, '.': 29, '-': 30, '<null_1>': 31, '<mask>': 32}\n self.standard_toks = list(standard_toks)\n self.prepend_toks = list(prepend_toks)\n self.append_toks = list(append_toks)\n self.prepend_bos = prepend_bos\n self.append_eos = append_eos\n self.use_msa = use_msa\n\n self.all_toks = list(self.prepend_toks)\n self.all_toks.extend(self.standard_toks)\n for i in range((8 - (len(self.all_toks) % 8)) % 8):\n self.all_toks.append(f\"<null_{i + 1}>\")\n self.all_toks.extend(self.append_toks)\n\n self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)}\n\n self.unk_idx = self.tok_to_idx[\"<unk>\"]\n self.padding_idx = self.get_idx(\"<pad>\")\n self.cls_idx = self.get_idx(\"<cls>\")\n self.mask_idx = self.get_idx(\"<mask>\")\n self.eos_idx = self.get_idx(\"<eos>\")\n self.all_special_tokens = ['<eos>', '<unk>', '<pad>', '<cls>', '<mask>']\n self.unique_no_split_tokens = self.all_toks\n\n def __len__(self):\n return len(self.all_toks)\n\n def get_idx(self, tok):\n return self.tok_to_idx.get(tok, self.unk_idx)\n\n def get_tok(self, ind):\n return self.all_toks[ind]\n\n def to_dict(self):\n return self.tok_to_idx.copy()\n\n def get_batch_converter(self, truncation_seq_length: int = None):\n if self.use_msa:\n return MSABatchConverter(self, truncation_seq_length)\n else:\n return BatchConverter(self, truncation_seq_length)\n\n @classmethod\n def from_architecture(cls, name: str) -> \"Alphabet\":\n if name in (\"ESM-1\", \"protein_bert_base\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks: Tuple[str, ...] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks: Tuple[str, ...] = (\"<cls>\", \"<mask>\", \"<sep>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n elif name in (\"ESM-1b\", \"roberta_large\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = True\n use_msa = False\n elif name in (\"MSA Transformer\", \"msa_transformer\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n elif \"invariant_gvp\" in name.lower():\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\", \"<cath>\", \"<af2>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa)\n\n def _tokenize(self, text) -> str:\n return text.split()\n\n def tokenize(self, text, **kwargs) -> List[str]:\n \"\"\"\n Inspired by https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py\n Converts a string in a sequence of tokens, using the tokenizer.\n\n Args:\n text (:obj:`str`):\n The sequence to be encoded.\n\n Returns:\n :obj:`List[str]`: The list of tokens.\n \"\"\"\n\n def split_on_token(tok, text):\n result = []\n split_text = text.split(tok)\n for i, sub_text in enumerate(split_text):\n # AddedToken can control whitespace stripping around them.\n # We use them for GPT2 and Roberta to have different behavior depending on the special token\n # Cf. https://github.com/huggingface/transformers/pull/2778\n # and https://github.com/huggingface/transformers/issues/3788\n # We strip left and right by default\n if i < len(split_text) - 1:\n sub_text = sub_text.rstrip()\n if i > 0:\n sub_text = sub_text.lstrip()\n\n if i == 0 and not sub_text:\n result.append(tok)\n elif i == len(split_text) - 1:\n if sub_text:\n result.append(sub_text)\n else:\n pass\n else:\n if sub_text:\n result.append(sub_text)\n result.append(tok)\n return result\n\n def split_on_tokens(tok_list, text):\n if not text.strip():\n return []\n\n tokenized_text = []\n text_list = [text]\n for tok in tok_list:\n tokenized_text = []\n for sub_text in text_list:\n if sub_text not in self.unique_no_split_tokens:\n tokenized_text.extend(split_on_token(tok, sub_text))\n else:\n tokenized_text.append(sub_text)\n text_list = tokenized_text\n\n return list(\n itertools.chain.from_iterable(\n (\n self._tokenize(token)\n if token not in self.unique_no_split_tokens\n else [token]\n for token in tokenized_text\n )\n )\n )\n\n no_split_token = self.unique_no_split_tokens\n tokenized_text = split_on_tokens(no_split_token, text)\n return tokenized_text\n\n def encode(self, text):\n return [self.tok_to_idx[tok] for tok in self.tokenize(text)]" }, { "identifier": "get_tranception_tokenizer", "path": "utils/tranception/model_pytorch.py", "snippet": "def get_tranception_tokenizer():\n #Tranception Alphabet: \"vocab\":{\"[UNK]\":0,\"[CLS]\":1,\"[SEP]\":2,\"[PAD]\":3,\"[MASK]\":4,\"A\":5,\"C\":6,\"D\":7,\"E\":8,\"F\":9,\"G\":10,\"H\":11,\"I\":12,\"K\":13,\"L\":14,\"M\":15,\"N\":16,\"P\":17,\"Q\":18,\"R\":19,\"S\":20,\"T\":21,\"V\":22,\"W\":23,\"Y\":24}\n dir_path = os.path.dirname(os.path.abspath(__file__))\n tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path + os.sep + \"utils/tokenizers/Basic_tokenizer\", unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",mask_token=\"[MASK]\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n tokenizer.tok_to_idx = tokenizer.vocab\n tokenizer.padding_idx = tokenizer.tok_to_idx[\"[PAD]\"]\n tokenizer.mask_idx = tokenizer.tok_to_idx[\"[MASK]\"]\n tokenizer.cls_idx = tokenizer.tok_to_idx[\"[CLS]\"]\n tokenizer.eos_idx = tokenizer.tok_to_idx[\"[SEP]\"]\n tokenizer.prepend_bos = True\n tokenizer.append_eos = True\n return tokenizer" }, { "identifier": "get_train_val_test_data", "path": "utils/data_utils.py", "snippet": "def get_train_val_test_data(args, assay_file_names):\n target_names = args.target_config.keys() \n assay_data={}\n merge = None\n main_target_name = None\n main_target_name_count = 0\n for target in target_names:\n if args.target_config[target][\"main_target\"]: \n main_target_name=target\n main_target_name_count+=1\n assert main_target_name is not None, \"No main target referenced. Please update config to select a unique main target.\"\n assert main_target_name_count <= 1, \"Several main targets referenced. Please update config to select a unique main target.\"\n \n assay_data[main_target_name] = pd.read_csv(args.target_config[main_target_name][\"location\"] + os.sep + assay_file_names[main_target_name])[['mutant','mutated_sequence',args.target_config[main_target_name][\"var_name\"],args.fold_variable_name]] \n assay_data[main_target_name].columns = ['mutant','mutated_sequence', main_target_name, args.fold_variable_name]\n merge = assay_data[main_target_name]\n \n for target_name in target_names:\n if target_name!=main_target_name:\n print(target_name)\n print(args.target_config)\n print(assay_file_names)\n assay_data[target_name] = pd.read_csv(args.target_config[target_name][\"location\"] + os.sep + assay_file_names[target_name])[['mutant',args.target_config[target_name][\"var_name\"]]] \n assay_data[target_name].columns = ['mutant',target_name]\n merge = pd.merge(merge, assay_data[target_name], how='left', on='mutant')\n \n if args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = pd.read_csv(args.zero_shot_fitness_predictions_location + os.sep + assay_file_names[main_target_name])[['mutant',args.zero_shot_fitness_predictions_var_name]]\n zero_shot_fitness_predictions.columns = ['mutant','zero_shot_fitness_predictions']\n zero_shot_fitness_predictions['zero_shot_fitness_predictions'] = standardize(zero_shot_fitness_predictions['zero_shot_fitness_predictions'])\n merge = pd.merge(merge,zero_shot_fitness_predictions,how='inner',on='mutant')\n\n train_val_test_splits = split_data_based_on_test_fold_index(\n dataframe = merge, \n fold_variable_name = args.fold_variable_name,\n test_fold_index = args.test_fold_index,\n use_validation_set = args.use_validation_set\n )\n splits_dict = {}\n for split_name, split in zip(['train','val','test'], train_val_test_splits):\n if split_name=='val' and not args.use_validation_set: continue\n splits_dict[split_name] = {}\n splits_dict[split_name]['mutant_mutated_seq_pairs'] = list(zip(list(split['mutant']),list(split['mutated_sequence'])))\n raw_targets = {target_name: split[target_name] for target_name in target_names}\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": raw_targets['zero_shot_fitness_predictions'] = split['zero_shot_fitness_predictions']\n if split_name==\"train\":\n raw_targets, target_processing = preprocess_training_targets(raw_targets, args.target_config)\n else:\n raw_targets = preprocess_test_targets(raw_targets, args.target_config, target_processing)\n for target_name in target_names: \n splits_dict[split_name][target_name] = raw_targets[target_name]\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": splits_dict[split_name]['zero_shot_fitness_predictions'] = raw_targets['zero_shot_fitness_predictions']\n # load dict into dataset objects\n train_data = Dataset.from_dict(splits_dict['train'])\n val_data = Dataset.from_dict(splits_dict['val']) if args.use_validation_set else None\n test_data = Dataset.from_dict(splits_dict['test'])\n return train_data, val_data, test_data, target_processing" }, { "identifier": "standardize", "path": "utils/data_utils.py", "snippet": "def standardize(x):\n return (x - x.mean()) / x.std()" }, { "identifier": "pnpt_count_non_nan", "path": "utils/data_utils.py", "snippet": "def pnpt_count_non_nan(x):\n missing_mask = np.isnan(x) | np.equal(x,-100)\n return np.count_nonzero(~missing_mask)" }, { "identifier": "pnpt_spearmanr", "path": "utils/data_utils.py", "snippet": "def pnpt_spearmanr(prediction,target):\n mask_missing_values = np.isnan(target) | np.equal(target, -100) #In PNPT missing values are never masked so corresponding labels are always set to -100\n return spearmanr(prediction[~mask_missing_values], target[~mask_missing_values])[0] #first value is spearman rho, second is the corresponding p-value " }, { "identifier": "process_MSA", "path": "utils/msa_utils.py", "snippet": "def process_MSA(args, MSA_filename, MSA_weights_filename):\n filtered_MSA_filename = filter_msa(filename = args.MSA_data_folder + os.sep + MSA_filename, path_to_hhfilter = args.path_to_hhfilter)\n MSA_all_sequences, MSA_non_ref_sequences_weights = compute_sequence_weights(MSA_filename = filtered_MSA_filename, MSA_weights_filename = args.MSA_weight_data_folder + os.sep + MSA_weights_filename)\n return MSA_all_sequences, MSA_non_ref_sequences_weights" }, { "identifier": "Trainer", "path": "utils/model_utils.py", "snippet": "class Trainer():\n def __init__(self, \n model,\n args,\n train_data, \n val_data,\n MSA_sequences, \n MSA_weights,\n MSA_start_position,\n MSA_end_position,\n target_processing,\n distributed_training=False\n ):\n self.model = model\n self.args = args\n self.train_data = train_data\n self.val_data = val_data\n self.MSA_sequences = MSA_sequences\n self.MSA_weights = MSA_weights\n self.MSA_start_position = MSA_start_position\n self.MSA_end_position = MSA_end_position\n self.target_processing = target_processing\n self.distributed_training = distributed_training\n \n def train(self):\n \"\"\"\n Returns the last value of training_step (useful in case of early stopping for isntance)\n \"\"\"\n \n self.model.train()\n self.model.cuda()\n self.model.set_device()\n\n if self.distributed_training:\n self.model = torch.nn.parallel.DistributedDataParallel(self.model)\n train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_data)\n else:\n train_sampler = None\n \n #To ensure reproducibility with seed setting\n def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n g = torch.Generator()\n g.manual_seed(0)\n train_loader = torch.utils.data.DataLoader(\n dataset=self.train_data, \n batch_size=self.args.training_num_assay_sequences_per_batch_per_gpu, \n shuffle=(train_sampler is None),\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True, \n sampler=train_sampler,\n collate_fn=collate_fn_protein_npt,\n worker_init_fn=seed_worker,\n generator=g,\n )\n optimizer = self.model.create_optimizer()\n scheduler = learning_rate_scheduler(\n num_warmup_steps=self.args.num_warmup_steps, \n num_total_training_steps=self.args.num_total_training_steps, \n max_learning_rate=self.args.max_learning_rate, \n min_learning_rate=self.args.min_learning_rate\n )\n \n train_iterator = iter(train_loader)\n num_epochs = 0\n prior_log_time = time.time()\n total_train_time = 0\n log_train_total_loss = 0\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n all_spearmans_eval_during_training = []\n max_average_spearman_across_targets = - math.inf\n if self.args.training_fp16: scaler = torch.cuda.amp.GradScaler()\n\n for training_step in tqdm.tqdm(range(1, self.args.num_total_training_steps+1)):\n optimizer.zero_grad(set_to_none=True)\n lr = scheduler(training_step)\n update_lr_optimizer(optimizer, lr)\n reconstruction_loss_coeff = get_reconstruction_loss_coefficient(training_step, num_total_training_steps=self.args.num_total_training_steps) if (self.model.model_type==\"ProteinNPT\" and not self.model.PNPT_no_reconstruction_error) else 0\n for gradient_accum_step in range(self.args.gradient_accumulation):\n try:\n batch = next(train_iterator)\n except:\n num_epochs +=1\n train_iterator = iter(train_loader)\n batch = next(train_iterator)\n \n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = None,\n proba_target_mask = 0.15,\n proba_aa_mask = 0.15,\n eval_mode = False,\n device=self.model.device,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=False,\n indel_mode=self.args.indel_mode\n )\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n scaler.scale(total_loss).backward()\n else:\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n if total_loss.item() > 10.0 and training_step >= 100:\n print(\"High training loss detected: {}\".format(total_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_norm_clip)\n # Taking optimizer update out of the inner loop to support gradient accumulation\n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n log_train_total_loss += total_loss\n for target_name in self.model.target_names:\n log_train_target_prediction_loss_dict[target_name] += target_prediction_loss_dict[target_name]\n if self.model.model_type==\"ProteinNPT\": \n log_train_reconstruction_loss += reconstruction_loss\n log_train_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum()\n for target_name in self.model.target_names:\n log_train_num_target_masked_tokens_dict[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item() # Masked targets are encoded by 1.0. Mask column is the very last one\n else:\n log_num_sequences_predicted += len(batch['mutant_mutated_seq_pairs'])\n \n if training_step % self.args.num_logging_training_steps == 0 and self.args.use_wandb:\n time_end_step = time.time()\n delta_time_since_last_log = time_end_step - prior_log_time\n total_train_time += delta_time_since_last_log\n prior_log_time = time_end_step\n train_logs = {\n \"training_step\": training_step, \n \"step_time\": delta_time_since_last_log / (self.args.num_logging_training_steps)\n }\n if self.model.model_type==\"ProteinNPT\": \n train_logs[\"train_total_loss_per_step\"]: log_train_total_loss / self.args.num_logging_training_steps\n train_logs[\"train_reconstruction_loss_per_masked_token\"] = log_train_reconstruction_loss / log_train_num_masked_tokens\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_masked_token\"] = log_train_target_prediction_loss_dict[target_name] / log_train_num_target_masked_tokens_dict[target_name]\n else:\n train_logs[\"train_total_loss_per_seq\"]: log_train_total_loss / log_num_sequences_predicted\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_seq\"] = log_train_target_prediction_loss_dict[target_name] / log_num_sequences_predicted\n wandb.log(train_logs)\n log_train_total_loss = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0 \n \n if self.args.save_model_checkpoint and (training_step % self.args.num_saving_training_steps) == 0:\n if not os.path.exists(self.args.model_location): os.mkdir(self.args.model_location)\n if not os.path.exists(self.args.model_location + os.sep + 'checkpoint-'+str(training_step)): os.mkdir(self.args.model_location + os.sep + 'checkpoint-'+str(training_step))\n torch.save({\n 'training_step': training_step,\n 'args': self.args,\n 'state_dict': self.model.state_dict(),\n 'optimizer' : optimizer.state_dict()\n }, \n self.args.model_location + os.sep + 'checkpoint-'+str(training_step) + os.sep + 'checkpoint.t7'\n )\n \n if training_step % self.args.num_eval_steps == 0 and self.args.use_validation_set:\n if self.model.model_type==\"ProteinNPT\":\n eval_results = self.eval(\n test_data=self.val_data,\n train_data=self.train_data,\n reconstruction_loss_weight=0.0,\n output_all_predictions=True\n )\n else:\n eval_results = self.eval(\n test_data=self.val_data, \n output_all_predictions=True\n )\n eval_logs = {\"Training step\": training_step} \n if self.model.model_type==\"ProteinNPT\":\n normalization = 0\n for target_name in self.model.target_names: normalization += eval_results['eval_num_masked_targets'][target_name]\n else:\n normalization = eval_results['eval_num_predicted_targets']\n eval_logs['Eval total loss per seq.']: eval_results['eval_total_loss'] / normalization\n average_spearman_across_targets = 0 #If early stopping based on validation spearman and multiple targets, we check that avg spearman is not decreasing for a certain # of times in a row\n for target_name in self.model.target_names:\n if self.model.model_type==\"ProteinNPT\": normalization = eval_results['eval_num_masked_targets'][target_name] #Update for PNPT (keeep the same normalization constant otherwise)\n eval_logs['Eval loss '+str(target_name)+' per seq.'] = eval_results['eval_target_prediction_loss_dict'][target_name] / normalization\n eval_logs['Eval spearman '+target_name] = spearmanr(eval_results['output_scores']['predictions_'+target_name], eval_results['output_scores']['labels_'+target_name])[0]\n average_spearman_across_targets += eval_logs['Eval spearman '+target_name]\n average_spearman_across_targets /= len(self.model.target_names)\n print(\" | \".join([key + \": \"+str(round(eval_logs[key],5)) for key in eval_logs.keys()]))\n if self.args.use_wandb: wandb.log(eval_logs)\n # Early stopping\n all_spearmans_eval_during_training.append(average_spearman_across_targets)\n if average_spearman_across_targets > max_average_spearman_across_targets: max_average_spearman_across_targets = average_spearman_across_targets\n if (training_step >= 1000) and (self.args.early_stopping_patience is not None) and (np.array(all_spearmans_eval_during_training)[-self.args.early_stopping_patience:].max() < max_average_spearman_across_targets):\n print(\"Early stopping. Training step: {}. Total eval loss: {}. Avg spearman: {}\".format(training_step, eval_results['eval_total_loss'], average_spearman_across_targets))\n break\n self.model.train() #Move back the model to train mode after eval loop\n trainer_final_status = {\n 'total_training_steps': training_step,\n 'total_train_time': total_train_time,\n 'total_training_epochs': num_epochs\n }\n return trainer_final_status\n\n def eval(self, test_data, output_all_predictions=False, need_head_weights=False, train_data = None, reconstruction_loss_weight=0.5, selected_indices_seed=0):\n \"\"\"\n total_eval_target_prediction_loss is the sum of all target prediction losses across all targets\n total_eval_target_prediction_loss contains the breakdown by target\n num_predicted_targets has the number of predicted items\n output_scores is a dict with sequences, predictions and labels\n \"\"\"\n self.model.eval()\n self.model.cuda()\n self.model.set_device()\n with torch.no_grad():\n eval_loader = torch.utils.data.DataLoader(\n dataset=test_data, \n batch_size=self.args.eval_num_sequences_to_score_per_batch_per_gpu, \n shuffle=False,\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True,\n collate_fn=collate_fn_protein_npt\n )\n eval_iterator = iter(eval_loader)\n \n eval_total_loss = 0\n if self.model.model_type==\"ProteinNPT\": \n eval_reconstruction_loss = 0\n eval_num_masked_tokens = 0\n eval_num_masked_targets = defaultdict(int)\n else:\n num_predicted_targets = 0\n eval_target_prediction_loss_dict = defaultdict(int)\n output_scores = defaultdict(list) if output_all_predictions else None\n\n if need_head_weights:\n col_attentions=[]\n row_attentions=[]\n\n for batch in tqdm.tqdm(eval_iterator):\n if output_all_predictions: \n output_scores['mutated_sequence'] += list(zip(*batch['mutant_mutated_seq_pairs']))[1]\n output_scores['mutant'] += list(zip(*batch['mutant_mutated_seq_pairs']))[0]\n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = train_data,\n proba_target_mask = 1.0, \n proba_aa_mask = 0.0,\n eval_mode = True,\n device=self.model.device,\n selected_indices_seed=selected_indices_seed,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=True,\n indel_mode=self.args.indel_mode\n )\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings'],\n need_head_weights=need_head_weights\n )\n batch_loss, batch_reconstruction_loss, batch_target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_weight, \n label_smoothing=self.args.label_smoothing\n )\n if batch_loss.item() > 10.0:\n print(\"High eval loss detected: {}\".format(batch_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n batch_loss, batch_target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n \n eval_total_loss += batch_loss.item()\n for target_name in self.model.target_names:\n eval_target_prediction_loss_dict[target_name] += batch_target_prediction_loss_dict[target_name].item()\n if self.model.model_type==\"ProteinNPT\":\n eval_reconstruction_loss += batch_reconstruction_loss.item()\n eval_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum().item()\n for target_name in self.model.target_names:\n eval_num_masked_targets[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item()\n else:\n num_predicted_targets += len(batch['mutant_mutated_seq_pairs'])\n if output_all_predictions:\n num_of_mutated_seqs_to_score = processed_batch['num_of_mutated_seqs_to_score'] if self.model.model_type==\"ProteinNPT\" else len(processed_batch['mutant_mutated_seq_pairs'])\n for target_name in self.model.target_names:\n output_scores['predictions_'+target_name] += list(output[\"target_predictions\"][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n output_scores['labels_'+target_name] += list(processed_batch['target_labels'][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n if need_head_weights:\n col_attentions.append(output[\"col_attentions\"])\n row_attentions.append(output[\"row_attentions\"])\n\n output_scores = pd.DataFrame.from_dict(output_scores)\n output_scores_numeric_cols = [col_name for col_name in output_scores.columns if col_name not in ['mutant','mutated_sequence']]\n output_scores = output_scores.groupby(['mutant'])[output_scores_numeric_cols].mean().reset_index() \n mutated_seqs_dict = {}\n mutant_mutated_seqs = list(zip(*test_data['mutant_mutated_seq_pairs']))\n mutated_seqs_dict['mutant'] = mutant_mutated_seqs[0]\n mutated_seqs_dict['mutated_sequence'] = mutant_mutated_seqs[1]\n mutated_seqs_df = pd.DataFrame.from_dict(mutated_seqs_dict)\n output_scores = pd.merge(output_scores, mutated_seqs_df, on='mutant', how='left')\n \n\n eval_results = {\n 'eval_total_loss':eval_total_loss,\n 'eval_target_prediction_loss_dict':eval_target_prediction_loss_dict,\n 'output_scores': output_scores\n }\n if need_head_weights:\n print(\"dimension of first attention column {}\".format(col_attentions[0].shape))\n eval_results['col_attentions'] = torch.stack(col_attentions, dim=0).cpu().numpy()\n eval_results['row_attentions'] = torch.stack(row_attentions, dim=0).cpu().numpy()\n \n if self.model.model_type==\"ProteinNPT\":\n eval_results['eval_reconstruction_loss']=eval_reconstruction_loss\n eval_results['eval_num_masked_tokens']=eval_num_masked_tokens\n eval_results['eval_num_masked_targets']=eval_num_masked_targets\n else:\n eval_results['eval_num_predicted_targets']=num_predicted_targets\n return eval_results" } ]
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
18,292
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization
spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
7
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "IMG_FORMATS", "path": "utils/datasets.py", "snippet": "IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/datasets.py", "snippet": "VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources) as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n print(f'Found {url} locally at {file}') # file already exists\n else:\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "non_max_suppression_obb", "path": "utils/general.py", "snippet": "def non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results_obb\n Args:\n prediction (tensor): (b, n_all_anchors, [cx cy l s obj num_cls theta_cls])\n agnostic (bool): True = NMS will be applied between elements of different categories\n labels : () or\n\n Returns:\n list of detections, len=batch_size, on (n,7) tensor per image [xylsθ, conf, cls] θ ∈ [-pi/2, pi/2)\n \"\"\"\n\n nc = prediction.shape[2] - 5 - 180 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n class_index = nc + 5\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n max_wh = 4096 # min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 30.0 # seconds to quit after\n # redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 7), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence, (tensor): (n_conf_thres, [cx cy l s obj num_cls theta_cls])\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:class_index] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n thete_index, theta_pred = torch.max(x[:, class_index:], 1, keepdim=True) # [n_conf_thres, 1] θ ∈ int[0, 179]\n theta_pred = (theta_pred - 90) / 180 * pi # [n_conf_thres, 1] θ ∈ [-pi/2, pi/2)\n\n # Detections matrix nx7 (xyls, θ, conf, cls) θ ∈ [-pi/2, pi/2)\n if multi_label:\n i, j = (x[:, 5:class_index] > conf_thres).nonzero(as_tuple=False).T # ()\n x = torch.cat((x[i, :4], theta_pred[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:class_index].max(1, keepdim=True)\n x = torch.cat((x[:, :4], theta_pred, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 6:7] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 5].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 6:7] * (0 if agnostic else max_wh) # classes\n rboxes = x[:, :5].clone() \n rboxes[:, :2] = rboxes[:, :2] + c # rboxes (offset by class)\n scores = x[:, 5] # scores\n _, i = obb_nms(rboxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "scale_polys", "path": "utils/general.py", "snippet": "def scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\n # ratio_pad: [(h_raw, w_raw), (hw_ratios, wh_paddings)]\n # Rescale coords (xyxyxyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = resized / raw\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0] # h_ratios\n pad = ratio_pad[1] # wh_paddings\n\n polys[:, [0, 2, 4, 6]] -= pad[0] # x padding\n polys[:, [1, 3, 5, 7]] -= pad[1] # y padding\n polys[:, :8] /= gain # Rescale poly shape to img0_shape\n #clip_polys(polys, img0_shape)\n return polys" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" } ]
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
14,903
if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
10
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @abstractmethod\n def generate_random_key(self) -> int:\n pass\n\n @abstractmethod\n def encrypt(\n self, plaintext: int, random_key: Union[Optional[int], Optional[list]] = None\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def decrypt(self, ciphertext: Union[int, tuple, list]) -> int:\n pass\n\n @abstractmethod\n def add(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def multiply(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple]:\n pass\n\n @abstractmethod\n def xor(self, ciphertext1: list, ciphertext2: list) -> list:\n pass\n\n @abstractmethod\n def multiply_by_contant(self, ciphertext: Union[int, tuple, list], constant: int) -> int:\n pass\n\n @abstractmethod\n def reencrypt(self, ciphertext: Union[int, tuple, list]) -> Union[int, tuple, list]:\n pass" }, { "identifier": "Algorithm", "path": "lightphe/models/Algorithm.py", "snippet": "class Algorithm:\n RSA = \"RSA\"\n ElGamal = \"ElGamal\"\n ExponentialElGamal = \"Exponential-ElGamal\"\n EllipticCurveElGamal = \"EllipticCurve-ElGamal\"\n Paillier = \"Paillier\"\n DamgardJurik = \"Damgard-Jurik\"\n OkamotoUchiyama = \"Okamoto-Uchiyama\"\n Benaloh = \"Benaloh\"\n NaccacheStern = \"Naccache-Stern\"\n GoldwasserMicali = \"Goldwasser-Micali\"" }, { "identifier": "RSA", "path": "lightphe/cryptosystems/RSA.py", "snippet": "class RSA(Homomorphic):\n \"\"\"\n RSA algorithm is partially homomorphic with respect to the multiplication\n Ref: https://sefiks.com/2023/03/06/a-step-by-step-partially-homomorphic-encryption-example-with-rsa-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 1024, encrypt_with_public=True):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n encrypt_with_public (boolean): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.encrypt_with_public = encrypt_with_public\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of RSA cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n while True:\n try:\n # picking a prime modulus p and q\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n # select public exponent e\n while True:\n e = random.randint(1, phi - 1)\n if math.gcd(e, n) == 1:\n break\n\n d = pow(e, -1, phi)\n break\n except:\n pass\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"e\"] = e\n keys[\"private_key\"][\"d\"] = d\n return keys\n\n def generate_random_key(self) -> int:\n pass\n\n def encrypt(self, plaintext: int) -> int:\n \"\"\"\n Encrypt plain messages with RSA\n Args:\n plaintext (int): plain message\n Returns:\n ciphertext (int): ciphertext encrypted with RSA\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if plaintext > n:\n plaintext = plaintext % n\n logger.debug(\n f\"RSA can encrypt messages [1, {n}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n if self.encrypt_with_public is True:\n e = self.keys[\"public_key\"][\"e\"]\n c = pow(plaintext, e, n)\n else:\n d = self.keys[\"private_key\"][\"d\"]\n c = pow(plaintext, d, n)\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt ciphertexts with RSA\n Args:\n ciphertext (int): encrypted message\n decrypt_with_private (int): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if self.encrypt_with_public is True:\n d = self.keys[\"private_key\"][\"d\"]\n p = pow(ciphertext, d, n)\n else:\n e = self.keys[\"public_key\"][\"e\"]\n p = pow(ciphertext, e, n)\n\n return p\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic multiplication on encrypted data.\n Result of this must be equal to E(m1 * m2)\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the addition\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n raise ValueError(\"RSA is not supporting multiplying ciphertext by a known constant\")\n\n def reencrypt(self, ciphertext: int) -> int:\n raise ValueError(\"RSA does not support re-encryption\")" }, { "identifier": "ElGamal", "path": "lightphe/cryptosystems/ElGamal.py", "snippet": "class ElGamal(Homomorphic):\n \"\"\"\n ElGamal algorithm is either multiplicatively or additively homomorphic\n Ref: https://sefiks.com/2023/03/27/a-step-by-step-partially-homomorphic-encryption-example-with-elgamal-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, exponential=False, key_size: int = 1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n exponential (boolean): set this to True to make cryptosystem exponential ElGamal.\n Regular ElGamal is homomorphic with respect to the multiplication whereas\n exponential ElGamal is homomorphic with respect to the addition\n \"\"\"\n self.exponential = exponential\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"p\"]\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(100, 2 ** int(key_size / 2) - 1)\n\n # picking a generator g\n g = random.randint(2, int(math.sqrt(p)))\n\n # picking a private key x\n x = random.randint(1, p - 2)\n\n # public key\n y = pow(g, x, p)\n\n keys[\"public_key\"] = {\n \"p\": p,\n \"g\": g,\n \"y\": y,\n }\n\n keys[\"private_key\"] = {\"x\": x}\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n return random.randint(1, p - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n y = self.keys[\"public_key\"][\"y\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n c1 = pow(g, r, p)\n if self.exponential is False:\n c2 = (plaintext * pow(y, r, p)) % p\n else:\n c2 = (pow(g, plaintext, p) * pow(y, r, p)) % p\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n c1, c2 = ciphertext\n\n x = self.keys[\"private_key\"][\"x\"]\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n m_prime = (c2 * pow(c1, -1 * x, p)) % p\n\n if self.exponential is False:\n return m_prime\n\n if self.exponential is True:\n # m_prime = g^m . Find m for known m_prime and known g (DLP).\n m = 0\n while True:\n if pow(g, m, p) == m_prime:\n return m\n m += 1\n if m > p:\n raise ValueError(f\"Cannot restore the message in [0, {p}]\")\n\n return -1\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic multiplication on encrypted data\n Result of this must be equal to E(m1 * m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is True:\n raise ValueError(\"Exponential ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is False:\n raise ValueError(\"Regular ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\"ElGamal is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n if self.exponential is False:\n raise ValueError(\"ElGamal is not supporting multiplying ciphertext by a known constant\")\n p = self.keys[\"public_key\"][\"p\"]\n if constant > p:\n constant = constant % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext[0], constant, p), pow(ciphertext[1], constant, p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.exponential is True:\n # then this is additively homomorphic\n neutral_element = 0\n else:\n # then this is multiplicatively homomorphic\n neutral_element = 1\n\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n\n if self.exponential is True:\n reencrypted_value = self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n else:\n reencrypted_value = self.multiply(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n return reencrypted_value" }, { "identifier": "Paillier", "path": "lightphe/cryptosystems/Paillier.py", "snippet": "class Paillier(Homomorphic):\n \"\"\"\n Paillier algorithm is homomorphic with respect to the addition.\n Also, it supports power operation for ciphertext base and plaintext exponent\n Ref: https://sefiks.com/2023/04/03/a-step-by-step-partially-homomorphic-encryption-example-with-paillier-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = n * n\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n assert math.gcd(r, n) == 1\n return (pow(g, plaintext, n * n) * pow(r, n, n * n)) % (n * n)\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n mu = pow(phi, -1, n)\n\n return (self.lx(pow(ciphertext, phi, n * n)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % (n * n)\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * m2) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Paillier\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Paillier can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, n * n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "DamgardJurik", "path": "lightphe/cryptosystems/DamgardJurik.py", "snippet": "class DamgardJurik(Homomorphic):\n \"\"\"\n Damgard-Jurik algorithm is a generalization of Paillier.\n It is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-damgard-jurik-in-python/\n \"\"\"\n\n def __init__(self, s: int = 2, keys: Optional[dict] = None, key_size: int = 1024):\n \"\"\"\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size=key_size, s=s)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = pow(n, s + 1)\n\n def generate_keys(self, key_size: int, s: Optional[int] = None):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"s\"] = s\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n r = random_key or self.generate_random_key()\n modulo = pow(n, s + 1)\n\n # assert math.gcd(r, n) == 1\n c = (pow(g, plaintext, modulo) * pow(r, n, modulo)) % modulo\n # c = (pow(g, plaintext, modulo) * pow(r, pow(n, s), modulo)) % modulo\n if math.gcd(c, modulo) != 1:\n logger.info(f\"WARNING! gcd({c=}, {modulo=}) != 1\")\n return c\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n mu = pow(phi, -1, n)\n modulo = pow(n, s + 1)\n return (self.lx(pow(ciphertext, phi, modulo)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n modulo = pow(n, s + 1)\n return (ciphertext1 * ciphertext2) % modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext by a known plain constant\n Result of this must be equal to E(m1 * m2), where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Damgard-Jurik\n constant (int): a known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Damgard-Jurik\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Damgard-Jurik can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "OkamotoUchiyama", "path": "lightphe/cryptosystems/OkamotoUchiyama.py", "snippet": "class OkamotoUchiyama(Homomorphic):\n \"\"\"\n Okamoto-Uchiyama algorithm is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-okamoto-uchiyama-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"private_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of OkamotoUchiyama cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # modulo\n n = p * p * q\n\n # generator\n g = random.randint(2, n)\n\n if pow(g, p - 1, p * p) == 1:\n raise ValueError(\"Fermat's Little Theorem must be satisfied\")\n\n h = pow(g, n, n)\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"h\"] = h\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Okamoto-Uchiyama requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with OkamotoUchiyama\n Args:\n plaintext (int): message to encrypt\n random_key (int): OkamotoUchiyama requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n h = self.keys[\"public_key\"][\"h\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"plaintext must be in scale [0, {p=}] but this is exceeded.\"\n \"New plaintext is {plaintext}\"\n )\n return (pow(g, plaintext, n) * pow(h, r, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Okamoto-Uchiyama\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n a = self.lx(pow(ciphertext, p - 1, p * p))\n b = self.lx(pow(g, p - 1, p * p))\n return (a * pow(b, -1, p)) % p\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with OkamotoUchiyama\n ciphertext2 (int): 2nd ciphertext created with OkamotoUchiyama\n Returns:\n ciphertext3 (int): 3rd ciphertext created with OkamotoUchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Okamoto-Uchiyama\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Okamoto-Uchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Okamoto-Uchiyama can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / p\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n if x % p != 1:\n raise ValueError(f\"Input passed to lx ({x}) must be identical to 1 in modulo {p}\")\n if math.gcd(x, p * p) != 1:\n raise ValueError(f\"gcd({x}, {p}^2) must be equal to 1\")\n y = (x - 1) // p\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "Benaloh", "path": "lightphe/cryptosystems/Benaloh.py", "snippet": "class Benaloh(Homomorphic):\n def __init__(self, keys: Optional[dict] = None, key_size: int = 50):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is less than other cryptosystems\n because decryption of Benaloh requires to solve DLP :/\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"r\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n x = 1\n while x == 1:\n # picking a prime p\n p = sympy.randprime(200, 2**key_size)\n\n # picking a prime q\n q = sympy.randprime(100, p)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n r = p - 1\n while gcd(q - 1, r) != 1:\n r = int(r / gcd(q - 1, r))\n\n if not (\n # r should divide p-1 without remainder\n (p - 1) % r == 0\n # r and (p - 1) / r must be coprimes\n and gcd(r, int((p - 1) / r)) == 1\n # r and q-1 must be coprimes\n and gcd(r, q - 1) == 1\n ):\n continue\n\n y = random.randint(2, n)\n if gcd(y, n) != 1:\n continue\n\n # to guarantee correct decryption\n prime_factors = sympy.factorint(r).keys()\n decryption_guaranteed = True\n for prime_factor in prime_factors:\n # none of r's prime factor should satisfy the condition\n if pow(y, int(phi / prime_factor), n) == 1:\n decryption_guaranteed = False\n\n if decryption_guaranteed is False:\n continue\n\n x = pow(y, int(phi / r), n)\n if x != 1:\n break\n\n keys[\"public_key\"][\"y\"] = y\n keys[\"public_key\"][\"r\"] = r\n keys[\"public_key\"][\"n\"] = n\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"x\"] = x\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Generate random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n u = random.randint(1, n)\n if gcd(u, n) == 1:\n break\n return u\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Benaloh\n Args:\n plaintext (int): message to encrypt\n random_key (int): Benaloh requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n y = self.keys[\"public_key\"][\"y\"]\n r = self.keys[\"public_key\"][\"r\"]\n n = self.keys[\"public_key\"][\"n\"]\n\n u = random_key or self.generate_random_key()\n\n if plaintext > r:\n plaintext = plaintext % r\n logger.debug(\n f\"Benaloh lets you to encrypt messages in [0, {r=}].\"\n f\"But your plaintext exceeds this limit.\"\n f\"New plaintext is {plaintext}\"\n )\n\n c = (pow(y, plaintext, n) * pow(u, r, n)) % n\n\n if gcd(c, n) != 1:\n logger.debug(\"ciphertext is not co-prime with n!\")\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt a given ciphertext with Benaloh\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n r = self.keys[\"public_key\"][\"r\"]\n phi = self.keys[\"private_key\"][\"phi\"]\n x = self.keys[\"private_key\"][\"x\"]\n\n a = pow(ciphertext, int(phi / r), n)\n\n md = 0\n while True:\n if pow(x, md, n) == a:\n break\n md = md + 1\n if md > r:\n raise ValueError(f\"Message cannot be restored in [{0}, {n}]\")\n return md\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Benaloh\n ciphertext2 (int): 2nd ciphertext created with Benaloh\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Benaloh\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Benaloh\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Benaloh\n \"\"\"\n # raise ValueError(\"Benaloh is not supporting multiplying by a constant\")\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Benaloh can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "NaccacheStern", "path": "lightphe/cryptosystems/NaccacheStern.py", "snippet": "class NaccacheStern(Homomorphic):\n \"\"\"\n Naccache-Stern algorithm is homomorphic with respect to the addition.\n It is a generaliation of Benaloh cryptosystem\n Ref: https://sefiks.com/2023/10/26/a-step-by-step-partially-homomorphic-encryption-example-with-naccache-stern-in-python/\n Original paper: https://dl.acm.org/doi/pdf/10.1145/288090.288106\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=37, deterministic: bool = False):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. Less than many cryptosystems because\n decryption requires to solve DLP.\n deterministic (boolean): deterministic or probabilistic version of\n cryptosystem\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"sigma\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.deterministic = deterministic\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Naccache-Stern cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # pick a family of small primes. the largest one is 10-bits\n # TODO: do something generic instead of constant primes\n prime_set = [3, 5, 7, 11, 13, 17]\n k = len(prime_set)\n\n if all(sympy.isprime(prime) is True for prime in prime_set) is False:\n raise ValueError(\"All items of prime set must be prime!\")\n\n # divide the set in half and find products of primes\n u = 1\n v = 1\n\n for i, prime in enumerate(prime_set):\n if i < len(prime_set) / 2:\n u = u * prime\n else:\n v = v * prime\n\n # product of all primes\n sigma = u * v\n\n # pick large prime numbers\n while True:\n a = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n b = sympy.randprime(100, a)\n\n # calculate two primes from chosen ones\n p = (2 * a * u) + 1\n q = (2 * b * v) + 1\n\n # recommended n is 768 bits\n n = p * q\n phi = (p - 1) * (q - 1)\n\n if phi % sigma != 0:\n logger.debug(\"canceled because phi cannot be divisible by sigma\")\n continue\n\n if math.gcd(sigma, int(phi // sigma)) != 1:\n logger.debug(\"canceled because sigma and phi/sigma are not coprime\")\n continue\n\n p_conditions = []\n for i in range(0, int(k / 2)):\n pi = prime_set[i]\n if (\n (p - 1) % pi == 0\n and math.gcd(pi, int((p - 1) / pi)) == 1\n and math.gcd(pi, q - 1) == 1\n ):\n p_conditions.append(1)\n else:\n p_conditions.append(0)\n p_satisfied = True if len(p_conditions) == sum(p_conditions) else False\n if p_satisfied is False:\n logger.debug(\"canceled because p_conditions are not satisfied\")\n continue\n\n q_conditions = []\n for i in range(int(k / 2), k):\n pi = prime_set[i]\n if (\n (q - 1) % pi == 0\n and math.gcd(pi, int((q - 1) / pi)) == 1\n and math.gcd(pi, p - 1)\n ):\n q_conditions.append(1)\n else:\n q_conditions.append(0)\n\n q_satisfied = True if len(q_conditions) == sum(q_conditions) else False\n if q_satisfied is False:\n logger.debug(\"canceled because q_conditions are not satisfied\")\n continue\n\n # p and q must be primes\n if not (sympy.isprime(p) and sympy.isprime(q)):\n continue\n\n # choose a generator g\n g = random.randint(2, n)\n # it must be co-prime to n\n if math.gcd(g, n) != 1:\n logger.debug(\"canceled becuase g is not co-prime with ne\")\n continue\n # guarantee it is not pi-th power.\n for pi in prime_set:\n logger.debug(\"canceled because g is a pi-th power\")\n if pow(g, int(phi / pi), n) == 1:\n continue\n\n # the order of g modulo n must be phi/4\n if pow(g, int(phi / 4), n) != 1:\n continue\n\n # check decryption is guaranteed similar to benaloh\n # ps: this is not mentioned in the original paper\n is_decryption_guaranteed = True\n for pi in prime_set:\n prime_factors = sympy.factorint(pi).keys()\n for prime_factor in prime_factors:\n if pow(g, int(phi / prime_factor), n) == 1:\n is_decryption_guaranteed = False\n if is_decryption_guaranteed is True:\n break\n\n logger.debug(f\"n bits is {len(bin(n)[2:])}\")\n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n # sigma can optionally be secret in deterministic version\n keys[\"public_key\"][\"sigma\"] = sigma\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"prime_set\"] = prime_set\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Naccache-Stern requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Naccache-Stern\n Args:\n plaintext (int): message to encrypt\n random_key (int): Naccache-Stern requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n sigma = self.keys[\"public_key\"][\"sigma\"]\n if plaintext > self.plaintext_modulo:\n plaintext = plaintext % self.plaintext_modulo\n logger.debug(\n f\"plaintext must be in scale [0, {self.plaintext_modulo}] \"\n \"but this is exceeded. New plaintext is {plaintext}\"\n )\n\n if self.deterministic is True:\n return pow(g, plaintext, n)\n\n # Probabilistic\n return (pow(r, sigma, n) * pow(g, plaintext, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Naccache-Stern\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n g = self.keys[\"public_key\"][\"g\"]\n prime_set = self.keys[\"private_key\"][\"prime_set\"]\n\n remainders = []\n for i, prime in enumerate(prime_set):\n ci = pow(ciphertext, int(phi / prime), n)\n logger.debug(f\"c_{i} = {ci}\")\n\n j = 0\n while True:\n if ci == pow(g, int((j * phi) / prime), n):\n logger.debug(f\"m_{i} = {j}\")\n remainders.append(j)\n break\n j = j + 1\n if j > prime**2:\n raise ValueError(\n f\"c_{i} cannot be restored from {ci} = {g}^(j*{phi}/{prime}) mod {n}\"\n )\n\n congruences = []\n for i in range(0, len(prime_set)):\n logger.debug(f\"m mod {prime_set[i]} = {remainders[i]}\")\n congruences.append((remainders[i], prime_set[i]))\n\n # chinese remainder problem\n ms = solve_congruence(*congruences)\n if not ms:\n raise ValueError(\"message cannot be restored with Chinese Remainder!\")\n return ms[0]\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Naccache-Stern\n ciphertext2 (int): 2nd ciphertext created with Naccache-Stern\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Naccache-Stern\n \"\"\"\n return (ciphertext1 * ciphertext2) % self.ciphertext_modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Naccache-Stern\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Naccache-Stern\n \"\"\"\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Naccache-Stern can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.deterministic is True:\n raise ValueError(\n \"Deterministic version of Naccache-Stern does not support reencryption.\"\n \"If you still want to perform ciphertext regeneration, then you may \"\n \"consider to use its probabilistic version.\"\n )\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "GoldwasserMicali", "path": "lightphe/cryptosystems/GoldwasserMicali.py", "snippet": "class GoldwasserMicali(Homomorphic):\n \"\"\"\n Goldwasser-Micali algorithm is homomorphic with respect to the Exclusively OR (XOR).\n Ref: https://sefiks.com/2023/10/27/a-step-by-step-partially-homomorphic-encryption-example-with-goldwasser-micali-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=100):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n # TODO: not sure about the plaintext modulo\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Goldwasser-Micali cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n\n # find non-residue x\n while True:\n x = random.randint(1, n - 1)\n if math.gcd(x, n) == 1 and jacobi_symbol(x, p) == -1 and jacobi_symbol(x, q) == -1:\n break\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"x\"] = x\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Goldwasser-Micali requires to generate one-time random key that co-prime to n\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(1, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> list:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Goldwasser-Micali\n Args:\n plaintext (int): message to encrypt\n random_key (int): Goldwasser-Micali requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n x = self.keys[\"public_key\"][\"x\"]\n\n m_binary = bin(plaintext)[2:]\n\n # number of bits\n k = len(m_binary)\n\n if random_key and len(random_key) != k:\n raise ValueError(f\"Random key must be length of {k}\")\n\n c = []\n for i in range(0, k):\n mi = int(m_binary[i])\n\n if random_key:\n ri = random_key[i]\n else:\n ri = self.generate_random_key()\n\n ci = (pow(ri, 2, n) * pow(x, mi, n)) % n\n c.append(ci)\n\n return c\n\n def decrypt(self, ciphertext: list) -> int:\n \"\"\"\n Decrypt a given ciphertext with Goldwasser-Micali\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n m_binaries = []\n\n p = self.keys[\"private_key\"][\"p\"]\n q = self.keys[\"private_key\"][\"q\"]\n\n for i in ciphertext:\n xp = i % p\n xq = i % q\n\n if pow(xp, int((p - 1) / 2), p) == 1 and pow(xq, int((q - 1) / 2), q) == 1:\n m_binaries.append(\"0\")\n else:\n m_binaries.append(\"1\")\n\n m_binary = \"\".join(m_binaries)\n return int(m_binary, 2)\n\n def add(self, ciphertext1: list, ciphertext2: list) -> list:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the addition\")\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> list:\n \"\"\"\n Perform homomorphic xor on encrypted data.\n Result of this must be equal to E(m1 ^ m2) = E(m1) ^ E(m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Goldwasser-Micali\n ciphertext2 (int): 2nd ciphertext created with Goldwasser-Micali\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Goldwasser-Micali\n \"\"\"\n ciphertext3 = []\n for i in range(0, len(ciphertext1)):\n c1 = ciphertext1[i]\n c2 = ciphertext2[i]\n ciphertext3.append((c1 * c2) % self.ciphertext_modulo)\n\n return ciphertext3\n\n def multiply_by_contant(self, ciphertext: int, constant: int):\n raise ValueError(\"Goldwasser-Micali does not support multiplying with constant\")\n\n def reencrypt(self, ciphertext: int):\n raise ValueError(\"Goldwasser-Micali does not support re-encryption\")" }, { "identifier": "EllipticCurveElGamal", "path": "lightphe/cryptosystems/EllipticCurveElGamal.py", "snippet": "class EllipticCurveElGamal(Homomorphic):\n \"\"\"\n Elliptic Curve ElGamal algorithm is an additively homomorphic algorithm\n Unluckily, it requires to solve (EC)DLP to restore plaintext in decryption\n However it is easy to restore plaintext while plaintext is not very large\n unsimilar to Benaloh or Naccache-Stern\n Ref: https://sefiks.com/2018/08/21/elliptic-curve-elgamal-encryption/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 160):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is 160.\n this is equivalent to 1024 bit RSA.\n \"\"\"\n # TODO: add different forms and curves. e.g. Koblitz, Edwards (Ed25519)\n self.curve = Weierstrass()\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.curve.p\n self.ciphertext_modulo = self.curve.p\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Elliptic Curve ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # private key\n ka = random.getrandbits(key_size)\n\n # public key\n Qa = self.curve.apply_double_and_add_method(G=self.curve.G, k=ka, p=self.curve.p)\n\n keys[\"public_key\"][\"Qa\"] = Qa\n keys[\"private_key\"][\"ka\"] = ka\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Elliptic Curve ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n return random.getrandbits(128)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with Elliptic Curve ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n # modulo\n p = self.curve.p\n\n # base point\n G = self.curve.G\n\n # public key\n Qa = self.keys[\"public_key\"][\"Qa\"]\n\n # random key\n r = random_key or self.generate_random_key()\n\n s = self.curve.apply_double_and_add_method(G=G, k=plaintext, p=p)\n\n c1 = self.curve.apply_double_and_add_method(G=G, k=r, p=p)\n\n c2 = self.curve.apply_double_and_add_method(G=Qa, k=r, p=p)\n c2 = self.curve.add_points(c2, s, p)\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with Elliptic Curve ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n # modulo\n p = self.curve.p\n\n # private key\n ka = self.keys[\"private_key\"][\"ka\"]\n\n c1, c2 = ciphertext\n c1_prime = (c1[0], (-1 * c1[1]) % p)\n s_prime = self.curve.apply_double_and_add_method(G=c1_prime, k=ka, p=p)\n s_prime = self.curve.add_points(P=c2, Q=s_prime, p=p)\n\n # s_prime is a point on the elliptic curve\n # s_prime = k x G\n # we need to find k from known s_prime and G\n # this requires to solve ECDLP\n\n # base point\n G = self.curve.G\n k = 2\n while True:\n G = self.curve.add_points(P=G, Q=self.curve.G, p=p)\n if G[0] == s_prime[0] and G[1] == s_prime[1]:\n return k\n k = k + 1\n if k > self.curve.n:\n raise ValueError(f\"Cannot restore scalar from {s_prime} = k x {self.curve.G}\")\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the multiplication\"\n )\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n a = self.curve.add_points(P=ciphertext1[0], Q=ciphertext2[0], p=self.curve.p)\n b = self.curve.add_points(P=ciphertext1[1], Q=ciphertext2[1], p=self.curve.p)\n return a, b\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the exclusive or\"\n )\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to k x E(m1) = E(m1 * k)\n where E(m1) = ciphertext\n Args:\n ciphertext (int): ciphertext created with Elliptic Curve ElGamal\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Elliptic Curve ElGamal\n \"\"\"\n return self.curve.apply_double_and_add_method(\n G=ciphertext[0], k=constant, p=self.curve.p\n ), self.curve.apply_double_and_add_method(G=ciphertext[1], k=constant, p=self.curve.p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n raise ValueError(\"Elliptic Curve ElGamal does not support regeneration of ciphertext\")" }, { "identifier": "phe_utils", "path": "lightphe/commons/phe_utils.py", "snippet": "def parse_int(value: Union[int, float], modulo: int) -> int:\ndef fractionize(value: float, modulo: int, precision: Optional[int] = None) -> Tuple[int, int]:\ndef solve_dlp():" }, { "identifier": "Logger", "path": "lightphe/commons/logger.py", "snippet": "class Logger:\n def __init__(self, module):\n self.module = module\n log_level = os.environ.get(\"LIGHTPHE_LOG_LEVEL\", str(logging.INFO))\n try:\n self.log_level = int(log_level)\n except Exception as err:\n self.dump_log(\n f\"Exception while parsing $LIGHTPHE_LOG_LEVEL.\"\n f\"Expected int but it is {log_level} ({str(err)})\"\n )\n self.log_level = logging.INFO\n\n def info(self, message):\n if self.log_level <= logging.INFO:\n self.dump_log(message)\n\n def debug(self, message):\n if self.log_level <= logging.DEBUG:\n self.dump_log(f\"🕷️ {message}\")\n\n def warn(self, message):\n if self.log_level <= logging.WARNING:\n self.dump_log(f\"⚠️ {message}\")\n\n def error(self, message):\n if self.log_level <= logging.ERROR:\n self.dump_log(f\"🔴 {message}\")\n\n def critical(self, message):\n if self.log_level <= logging.CRITICAL:\n self.dump_log(f\"💥 {message}\")\n\n def dump_log(self, message):\n print(f\"{str(datetime.now())[2:-7]} - {message}\")" } ]
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,545
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys)
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys)
elif algorithm_name == Algorithm.OkamotoUchiyama:
6
2023-10-28 14:57:59+00:00
24k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/mapping.py
[ { "identifier": "PeftModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModel(PushToHubMixin, torch.nn.Module):\n \"\"\"\n Base model encompassing various Peft methods.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.\n peft_config ([`PeftConfig`]): The configuration of the Peft model.\n\n\n **Attributes**:\n - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft.\n - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.\n - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when\n saving the model.\n - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if\n using [`PromptLearningConfig`].\n - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if\n using [`PromptLearningConfig`].\n - **transformer_backbone_name** (`str`) -- The name of the transformer\n backbone in the base model if using [`PromptLearningConfig`].\n - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone\n in the base model if using [`PromptLearningConfig`].\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__()\n self.base_model = model\n self.config = self.base_model.config\n self.modules_to_save = None\n self.peft_config = {}\n self.active_adapter = adapter_name\n self.peft_type = peft_config.peft_type\n self.base_model_torch_dtype = getattr(model, \"dtype\", None)\n if not isinstance(peft_config, PromptLearningConfig):\n self.peft_config[adapter_name] = peft_config\n self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](\n self.base_model, self.peft_config, adapter_name\n )\n else:\n self.add_adapter(adapter_name, peft_config)\n\n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n This function saves the adapter model and the adapter configuration files to a directory, so that it can be\n reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`]\n method.\n\n Args:\n save_directory (`str`):\n Directory where the adapter model and configuration files will be saved (will be created if it does not\n exist).\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the `push_to_hub` method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise ValueError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n os.makedirs(save_directory, exist_ok=True)\n\n for adapter_name, peft_config in self.peft_config.items():\n # save only the trainable weights\n output_state_dict = get_peft_model_state_dict(\n self, state_dict=kwargs.get(\"state_dict\", None), adapter_name=adapter_name\n )\n output_dir = os.path.join(save_directory, adapter_name) if adapter_name != \"default\" else save_directory\n os.makedirs(output_dir, exist_ok=True)\n torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n\n # save the config and change the inference mode to `True`\n if peft_config.base_model_name_or_path is None:\n peft_config.base_model_name_or_path = (\n self.base_model.__dict__.get(\"name_or_path\", None)\n if isinstance(peft_config, PromptLearningConfig)\n else self.base_model.model.__dict__.get(\"name_or_path\", None)\n )\n inference_mode = peft_config.inference_mode\n peft_config.inference_mode = True\n peft_config.save_pretrained(output_dir)\n peft_config.inference_mode = inference_mode\n\n @classmethod\n def from_pretrained(cls, model, model_id, adapter_name=\"default\", is_trainable=False, **kwargs):\n r\"\"\"\n Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights.\n\n Args:\n model ([`~transformers.PreTrainedModel`]):\n The model to be adapted. The model should be initialized with the\n [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library.\n model_id (`str` or `os.PathLike`):\n The name of the Lora configuration to use. Can be either:\n - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face\n Hub.\n - A path to a directory containing a Lora configuration file saved using the `save_pretrained`\n method (`./my_lora_config_directory/`).\n \"\"\"\n from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING\n\n # load the config\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n print(\"Config: \", config)\n\n if (getattr(model, \"hf_device_map\", None) is not None) and len(\n set(model.hf_device_map.values()).intersection({\"cpu\", \"disk\"})\n ) > 0:\n remove_hook_from_submodules(model)\n\n if isinstance(config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n config.inference_mode = not is_trainable\n\n if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():\n model = cls(model, config, adapter_name)\n else:\n model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)\n model.load_adapter(model_id, adapter_name, **kwargs)\n return model\n\n def _setup_prompt_encoder(self, adapter_name):\n config = self.peft_config[adapter_name]\n self.prompt_encoder = torch.nn.ModuleDict({})\n self.prompt_tokens = {}\n transformer_backbone = None\n for name, module in self.base_model.named_children():\n for param in module.parameters():\n param.requires_grad = False\n if isinstance(module, PreTrainedModel):\n # Make sure to freeze Tranformers model\n if transformer_backbone is None:\n transformer_backbone = module\n self.transformer_backbone_name = name\n\n if config.num_transformer_submodules is None:\n config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1\n\n for named_param, value in list(transformer_backbone.named_parameters()):\n if value.shape[0] == self.base_model.config.vocab_size:\n self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(\".weight\", \"\"))\n break\n\n if config.peft_type == PeftType.PROMPT_TUNING:\n prompt_encoder = PromptEmbedding(config, self.word_embeddings)\n elif config.peft_type == PeftType.P_TUNING:\n prompt_encoder = PromptEncoder(config)\n elif config.peft_type == PeftType.PREFIX_TUNING:\n prompt_encoder = PrefixEncoder(config)\n else:\n raise ValueError(\"Not supported\")\n self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))\n self.prompt_tokens[adapter_name] = torch.arange(\n config.num_virtual_tokens * config.num_transformer_submodules\n ).long()\n\n def get_prompt_embedding_to_save(self, adapter_name):\n \"\"\"\n Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type !=\n PeftType.LORA`.\n \"\"\"\n prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(self.device)\n if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]\n prompt_embeddings = self.prompt_encoder[adapter_name](prompt_tokens)\n return prompt_embeddings[0].detach().cpu()\n\n def get_prompt(self, batch_size):\n \"\"\"\n Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`.\n \"\"\"\n peft_config = self.active_peft_config\n prompt_encoder = self.prompt_encoder[self.active_adapter]\n prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(self.device)\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]\n if peft_config.inference_mode:\n past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n past_key_values = prompt_encoder(prompt_tokens)\n past_key_values = past_key_values.view(\n batch_size,\n peft_config.num_virtual_tokens,\n peft_config.num_layers * 2,\n peft_config.num_attention_heads,\n peft_config.token_dim // peft_config.num_attention_heads,\n )\n if peft_config.num_transformer_submodules == 2:\n past_key_values = torch.cat([past_key_values, past_key_values], dim=2)\n past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(\n peft_config.num_transformer_submodules * 2\n )\n if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:\n post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]\n past_key_values = post_process_fn(past_key_values)\n return past_key_values\n else:\n if peft_config.inference_mode:\n prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n prompts = prompt_encoder(prompt_tokens)\n return prompts\n\n def print_trainable_parameters(self):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in self.named_parameters():\n num_params = param.numel()\n # if using DS Zero 3 and the weights are initialized empty\n if num_params == 0 and hasattr(param, \"ds_numel\"):\n num_params = param.ds_numel\n\n all_param += num_params\n if param.requires_grad:\n trainable_params += num_params\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.base_model, name)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Forward pass of the model.\n \"\"\"\n return self.get_base_model()(*args, **kwargs)\n\n @contextmanager\n def disable_adapter(self):\n \"\"\"\n Disables the adapter module.\n \"\"\"\n if isinstance(self.active_peft_config, PromptLearningConfig):\n old_forward = self.forward\n self.forward = self.base_model.forward\n else:\n self.base_model.disable_adapter_layers()\n yield\n if isinstance(self.active_peft_config, PromptLearningConfig):\n self.forward = old_forward\n else:\n self.base_model.enable_adapter_layers()\n\n def get_base_model(self):\n \"\"\"\n Returns the base model.\n \"\"\"\n return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model\n\n def add_adapter(self, adapter_name, peft_config):\n if peft_config.peft_type != self.peft_type:\n raise ValueError(\n f\"Cannot combine adapters with different peft types. \"\n f\"Found {self.peft_type} and {peft_config.peft_type}.\"\n )\n self.peft_config[adapter_name] = peft_config\n if isinstance(peft_config, PromptLearningConfig):\n self._setup_prompt_encoder(adapter_name)\n else:\n self.base_model.add_adapter(adapter_name, peft_config)\n if getattr(peft_config, \"modules_to_save\", None) is not None:\n if self.modules_to_save is None:\n self.modules_to_save = set(peft_config.modules_to_save)\n else:\n self.modules_to_save = self.modules_to_save.update(peft_config.modules_to_save)\n _set_trainable(self, adapter_name)\n\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING\n\n if adapter_name not in self.peft_config:\n # load the config\n peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n if isinstance(peft_config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n peft_config.inference_mode = not is_trainable\n self.add_adapter(adapter_name, peft_config)\n\n # load weights if any\n path = os.path.join(model_id, kwargs[\"subfolder\"]) if kwargs.get(\"subfolder\", None) is not None else model_id\n print(\"Load from adapter:\", WEIGHTS_NAME)\n if os.path.exists(os.path.join(path, WEIGHTS_NAME)):\n filename = os.path.join(path, WEIGHTS_NAME)\n else:\n try:\n filename = hf_hub_download(model_id, WEIGHTS_NAME, subfolder=kwargs.get(\"subfolder\", None))\n except: # noqa\n raise ValueError(\n f\"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. \"\n f\"Please check that the file {WEIGHTS_NAME} is present at {model_id}.\"\n )\n\n adapters_weights = torch.load(\n filename, map_location=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n )\n # load the weights into the model\n set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)\n if (\n (getattr(self, \"hf_device_map\", None) is not None)\n and (len(set(self.hf_device_map.values()).intersection({\"cpu\", \"disk\"})) > 0)\n and len(self.peft_config) == 1\n ):\n device_map = kwargs.get(\"device_map\", \"auto\")\n max_memory = kwargs.get(\"max_memory\", None)\n offload_dir = kwargs.get(\"offload_folder\", None)\n offload_index = kwargs.get(\"offload_index\", None)\n\n dispatch_model_kwargs = {}\n # Safety checker for previous `accelerate` versions\n # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/\n if \"offload_index\" in inspect.signature(dispatch_model).parameters:\n dispatch_model_kwargs[\"offload_index\"] = offload_index\n\n no_split_module_classes = self._no_split_modules\n\n if device_map != \"sequential\":\n max_memory = get_balanced_memory(\n self,\n max_memory=max_memory,\n no_split_module_classes=no_split_module_classes,\n low_zero=(device_map == \"balanced_low_0\"),\n )\n if isinstance(device_map, str):\n device_map = infer_auto_device_map(\n self, max_memory=max_memory, no_split_module_classes=no_split_module_classes\n )\n dispatch_model(\n self,\n device_map=device_map,\n offload_dir=offload_dir,\n **dispatch_model_kwargs,\n )\n hook = AlignDevicesHook(io_same_device=True)\n if isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n remove_hook_from_submodules(self.prompt_encoder)\n add_hook_to_module(self.get_base_model(), hook)\n\n # Set model in evaluation mode to deactivate Dropout modules by default\n self.eval()\n\n def set_adapter(self, adapter_name):\n \"\"\"\n Sets the active adapter.\n \"\"\"\n if adapter_name not in self.peft_config:\n raise ValueError(f\"Adapter {adapter_name} not found.\")\n self.active_adapter = adapter_name\n if not isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n self.base_model.set_adapter(adapter_name)\n _set_adapter(self, adapter_name)\n\n @property\n def active_peft_config(self):\n return self.peft_config[self.active_adapter]" }, { "identifier": "PeftModelForCausalLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n # For gpt2 models, we construct postion_ids on the fly by using attention mask, and position ids need to match input_shape.\n # for prefix tuning, input shape is determined using `input_ids`. Thus we should not expand 'attention_mask' here\n # for prompt tuning input_ids is not passed but a concatenated input_embeds is passed. Thus attention_mask needs to be of same size of num_virtual_tokens + input_ids\n if kwargs.get(\"attention_mask\", None) is not None and peft_config.peft_type in [\n PeftType.PROMPT_TUNING,\n PeftType.P_TUNING,\n ]:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(\n kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(kwargs[\"input_ids\"].device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, kwargs[\"attention_mask\"]), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if isinstance(peft_config, PromptLearningConfig):\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype)\n for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs" }, { "identifier": "PeftModelForSeq2SeqLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSeq2SeqLM(PeftModel):\n \"\"\"\n Peft model for sequence-to-sequence language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM\n >>> from peft import PeftModelForSeq2SeqLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"LORA\",\n ... \"task_type\": \"SEQ_2_SEQ_LM\",\n ... \"inference_mode\": False,\n ... \"r\": 8,\n ... \"target_modules\": [\"q\", \"v\"],\n ... \"lora_alpha\": 32,\n ... \"lora_dropout\": 0.1,\n ... \"merge_weights\": False,\n ... \"fan_in_fan_out\": False,\n ... \"enable_lora\": None,\n ... \"bias\": \"none\",\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n self.base_model_prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model._prepare_encoder_decoder_kwargs_for_generation\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n decoder_inputs_embeds=decoder_inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if decoder_attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n if decoder_inputs_embeds is None and decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)\n\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n # concat prompt labels\n if labels is not None:\n if peft_config.num_transformer_submodules == 1:\n kwargs[\"labels\"] = labels\n elif peft_config.num_transformer_submodules == 2:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)\n if peft_config.num_transformer_submodules == 1:\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n elif peft_config.num_transformer_submodules == 2:\n decoder_inputs_embeds = torch.cat(\n (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1\n )\n return self.base_model(\n inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs\n )\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self._prepare_encoder_decoder_kwargs_for_generation\n )\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n outputs = self.base_model.generate(**kwargs)\n else:\n raise NotImplementedError\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n batch_size = model_kwargs[\"decoder_input_ids\"].shape[0]\n past_key_values = self.get_prompt(batch_size)\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n model_kwargs[\"past_key_values\"] = past_key_values\n\n return model_kwargs" }, { "identifier": "PeftModelForSequenceClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSequenceClassification(PeftModel):\n \"\"\"\n Peft model for sequence classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForSequenceClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"SEQ_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForSequenceClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n pooled_output = self.base_model.dropout(pooled_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.base_model.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.base_model.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "PeftModelForTokenClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForTokenClassification(PeftModel):\n \"\"\"\n Peft model for token classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForTokenClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"TOKEN_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForTokenClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForTokenClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig = None, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n sequence_output = outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n sequence_output = self.base_model.dropout(sequence_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)\n\n loss = None\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "LoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA" }, { "identifier": "AdaLoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" }, { "identifier": "PromptEncoderConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/p_tuning.py", "snippet": "class PromptEncoderConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEncoder`].\n\n Args:\n encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]):\n The type of reparameterization to use.\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n encoder_num_layers (`int`): The number of layers of the prompt encoder.\n encoder_dropout (`float`): The dropout probability of the prompt encoder.\n \"\"\"\n\n encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(\n default=PromptEncoderReparameterizationType.MLP,\n metadata={\"help\": \"How to reparameterize the prompt encoder\"},\n )\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the prompt encoder\"},\n )\n encoder_num_layers: int = field(\n default=2,\n metadata={\"help\": \"The number of layers of the prompt encoder\"},\n )\n encoder_dropout: float = field(\n default=0.0,\n metadata={\"help\": \"The dropout of the prompt encoder\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.P_TUNING" }, { "identifier": "PrefixTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prefix_tuning.py", "snippet": "class PrefixTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PrefixEncoder`].\n\n Args:\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n prefix_projection (`bool`): Whether to project the prefix embeddings.\n \"\"\"\n\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the encoder\"},\n )\n prefix_projection: bool = field(\n default=False,\n metadata={\"help\": \"Whether to project the prefix tokens\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PREFIX_TUNING" }, { "identifier": "PromptTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prompt_tuning.py", "snippet": "class PromptTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEmbedding`].\n\n Args:\n prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.\n prompt_tuning_init_text (`str`, *optional*):\n The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.\n tokenizer_name_or_path (`str`, *optional*):\n The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.\n \"\"\"\n\n prompt_tuning_init: Union[PromptTuningInit, str] = field(\n default=PromptTuningInit.RANDOM,\n metadata={\"help\": \"How to initialize the prompt tuning parameters\"},\n )\n prompt_tuning_init_text: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PROMPT_TUNING" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" } ]
from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import AdaLoraConfig, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig from .utils import PromptLearningConfig
14,908
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance(
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance(
peft_config, PromptLearningConfig
10
2023-10-30 10:50:32+00:00
24k
chenran-li/RQL-release
sb3_contrib/ars/ars.py
[ { "identifier": "BaseAlgorithm", "path": "stable_baselines3/common/base_class.py", "snippet": "class BaseAlgorithm(ABC):\n \"\"\"\n The base of RL algorithms\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param learning_rate: learning rate for the optimizer,\n it can be a function of the current progress remaining (from 1 to 0)\n :param policy_kwargs: Additional arguments to be passed to the policy on creation\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param device: Device on which the code should run.\n By default, it will try to use a Cuda compatible device and fallback to cpu\n if it is not possible.\n :param support_multi_env: Whether the algorithm supports training\n with multiple environments (as in A2C)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param seed: Seed for the pseudo random generators\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n # Policy aliases (see _get_policy_from_name())\n policy_aliases: Dict[str, Type[BasePolicy]] = {}\n\n def __init__(\n self,\n policy: Union[str, Type[BasePolicy]],\n env: Union[GymEnv, str, None],\n learning_rate: Union[float, Schedule],\n policy_kwargs: Optional[Dict[str, Any]] = None,\n tensorboard_log: Optional[str] = None,\n verbose: int = 0,\n device: Union[th.device, str] = \"auto\",\n support_multi_env: bool = False,\n monitor_wrapper: bool = True,\n seed: Optional[int] = None,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n if isinstance(policy, str):\n self.policy_class = self._get_policy_from_name(policy)\n else:\n self.policy_class = policy\n\n self.device = get_device(device)\n if verbose >= 1:\n print(f\"Using {self.device} device\")\n\n self.env = None # type: Optional[GymEnv]\n # get VecNormalize object if needed\n self._vec_normalize_env = unwrap_vec_normalize(env)\n self.verbose = verbose\n self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs\n self.observation_space = None # type: Optional[spaces.Space]\n self.action_space = None # type: Optional[spaces.Space]\n self.n_envs = None\n self.num_timesteps = 0\n # Used for updating schedules\n self._total_timesteps = 0\n # Used for computing fps, it is updated at each call of learn()\n self._num_timesteps_at_start = 0\n self.seed = seed\n self.action_noise: Optional[ActionNoise] = None\n self.start_time = None\n self.policy = None\n self.learning_rate = learning_rate\n self.tensorboard_log = tensorboard_log\n self.lr_schedule = None # type: Optional[Schedule]\n self._last_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]]\n self._last_episode_starts = None # type: Optional[np.ndarray]\n # When using VecNormalize:\n self._last_original_obs = None # type: Optional[Union[np.ndarray, Dict[str, np.ndarray]]]\n self._episode_num = 0\n # Used for gSDE only\n self.use_sde = use_sde\n self.sde_sample_freq = sde_sample_freq\n # Track the training progress remaining (from 1 to 0)\n # this is used to update the learning rate\n self._current_progress_remaining = 1\n # Buffers for logging\n self.ep_info_buffer = None # type: Optional[deque]\n self.ep_success_buffer = None # type: Optional[deque]\n # For logging (and TD3 delayed updates)\n self._n_updates = 0 # type: int\n # The logger object\n self._logger = None # type: Logger\n # Whether the user passed a custom logger or not\n self._custom_logger = False\n\n # Create and wrap the env if needed\n if env is not None:\n env = maybe_make_env(env, self.verbose)\n env = self._wrap_env(env, self.verbose, monitor_wrapper)\n\n self.observation_space = env.observation_space\n self.action_space = env.action_space\n self.n_envs = env.num_envs\n self.env = env\n\n if supported_action_spaces is not None:\n assert isinstance(self.action_space, supported_action_spaces), (\n f\"The algorithm only supports {supported_action_spaces} as action spaces \"\n f\"but {self.action_space} was provided\"\n )\n\n if not support_multi_env and self.n_envs > 1:\n raise ValueError(\n \"Error: the model does not support multiple envs; it requires \" \"a single vectorized environment.\"\n )\n\n # Catch common mistake: using MlpPolicy/CnnPolicy instead of MultiInputPolicy\n if policy in [\"MlpPolicy\", \"CnnPolicy\"] and isinstance(self.observation_space, spaces.Dict):\n raise ValueError(f\"You must use `MultiInputPolicy` when working with dict observation space, not {policy}\")\n\n if self.use_sde and not isinstance(self.action_space, spaces.Box):\n raise ValueError(\"generalized State-Dependent Exploration (gSDE) can only be used with continuous actions.\")\n\n if isinstance(self.action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([self.action_space.low, self.action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n @staticmethod\n def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> VecEnv:\n \"\"\" \"\n Wrap environment with the appropriate wrappers if needed.\n For instance, to have a vectorized environment\n or to re-order the image channels.\n\n :param env:\n :param verbose: Verbosity level: 0 for no output, 1 for indicating wrappers used\n :param monitor_wrapper: Whether to wrap the env in a ``Monitor`` when possible.\n :return: The wrapped environment.\n \"\"\"\n if not isinstance(env, VecEnv):\n if not is_wrapped(env, Monitor) and monitor_wrapper:\n if verbose >= 1:\n print(\"Wrapping the env with a `Monitor` wrapper\")\n env = Monitor(env)\n if verbose >= 1:\n print(\"Wrapping the env in a DummyVecEnv.\")\n env = DummyVecEnv([lambda: env])\n\n # Make sure that dict-spaces are not nested (not supported)\n check_for_nested_spaces(env.observation_space)\n\n if not is_vecenv_wrapped(env, VecTransposeImage):\n wrap_with_vectranspose = False\n if isinstance(env.observation_space, spaces.Dict):\n # If even one of the keys is a image-space in need of transpose, apply transpose\n # If the image spaces are not consistent (for instance one is channel first,\n # the other channel last), VecTransposeImage will throw an error\n for space in env.observation_space.spaces.values():\n wrap_with_vectranspose = wrap_with_vectranspose or (\n is_image_space(space) and not is_image_space_channels_first(space)\n )\n else:\n wrap_with_vectranspose = is_image_space(env.observation_space) and not is_image_space_channels_first(\n env.observation_space\n )\n\n if wrap_with_vectranspose:\n if verbose >= 1:\n print(\"Wrapping the env in a VecTransposeImage.\")\n env = VecTransposeImage(env)\n\n return env\n\n @abstractmethod\n def _setup_model(self) -> None:\n \"\"\"Create networks, buffer and optimizers.\"\"\"\n\n def set_logger(self, logger: Logger) -> None:\n \"\"\"\n Setter for for logger object.\n\n .. warning::\n\n When passing a custom logger object,\n this will overwrite ``tensorboard_log`` and ``verbose`` settings\n passed to the constructor.\n \"\"\"\n self._logger = logger\n # User defined logger\n self._custom_logger = True\n\n @property\n def logger(self) -> Logger:\n \"\"\"Getter for the logger object.\"\"\"\n return self._logger\n\n def _setup_lr_schedule(self) -> None:\n \"\"\"Transform to callable if needed.\"\"\"\n self.lr_schedule = get_schedule_fn(self.learning_rate)\n\n def _update_current_progress_remaining(self, num_timesteps: int, total_timesteps: int) -> None:\n \"\"\"\n Compute current progress remaining (starts from 1 and ends to 0)\n\n :param num_timesteps: current number of timesteps\n :param total_timesteps:\n \"\"\"\n self._current_progress_remaining = 1.0 - float(num_timesteps) / float(total_timesteps)\n\n def _update_learning_rate(self, optimizers: Union[List[th.optim.Optimizer], th.optim.Optimizer]) -> None:\n \"\"\"\n Update the optimizers learning rate using the current learning rate schedule\n and the current progress remaining (from 1 to 0).\n\n :param optimizers:\n An optimizer or a list of optimizers.\n \"\"\"\n # Log the current learning rate\n self.logger.record(\"train/learning_rate\", self.lr_schedule(self._current_progress_remaining))\n\n if not isinstance(optimizers, list):\n optimizers = [optimizers]\n for optimizer in optimizers:\n update_learning_rate(optimizer, self.lr_schedule(self._current_progress_remaining))\n\n def _excluded_save_params(self) -> List[str]:\n \"\"\"\n Returns the names of the parameters that should be excluded from being\n saved by pickling. E.g. replay buffers are skipped by default\n as they take up a lot of space. PyTorch variables should be excluded\n with this so they can be stored with ``th.save``.\n\n :return: List of parameters that should be excluded from being saved with pickle.\n \"\"\"\n return [\n \"policy\",\n \"device\",\n \"env\",\n \"replay_buffer\",\n \"rollout_buffer\",\n \"_vec_normalize_env\",\n \"_episode_storage\",\n \"_logger\",\n \"_custom_logger\",\n ]\n\n def _get_policy_from_name(self, policy_name: str) -> Type[BasePolicy]:\n \"\"\"\n Get a policy class from its name representation.\n\n The goal here is to standardize policy naming, e.g.\n all algorithms can call upon \"MlpPolicy\" or \"CnnPolicy\",\n and they receive respective policies that work for them.\n\n :param policy_name: Alias of the policy\n :return: A policy class (type)\n \"\"\"\n\n if policy_name in self.policy_aliases:\n return self.policy_aliases[policy_name]\n else:\n raise ValueError(f\"Policy {policy_name} unknown\")\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n \"\"\"\n Get the name of the torch variables that will be saved with\n PyTorch ``th.save``, ``th.load`` and ``state_dicts`` instead of the default\n pickling strategy. This is to handle device placement correctly.\n\n Names can point to specific variables under classes, e.g.\n \"policy.optimizer\" would point to ``optimizer`` object of ``self.policy``\n if this object.\n\n :return:\n List of Torch variables whose state dicts to save (e.g. th.nn.Modules),\n and list of other Torch variables to store with ``th.save``.\n \"\"\"\n state_dicts = [\"policy\"]\n\n return state_dicts, []\n\n def _init_callback(\n self,\n callback: MaybeCallback,\n progress_bar: bool = False,\n ) -> BaseCallback:\n \"\"\"\n :param callback: Callback(s) called at every step with state of the algorithm.\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: A hybrid callback calling `callback` and performing evaluation.\n \"\"\"\n # Convert a list of callbacks into a callback\n if isinstance(callback, list):\n callback = CallbackList(callback)\n\n # Convert functional callback to object\n if not isinstance(callback, BaseCallback):\n callback = ConvertCallback(callback)\n\n # Add progress bar callback\n if progress_bar:\n callback = CallbackList([callback, ProgressBarCallback()])\n\n callback.init_callback(self)\n return callback\n\n def _setup_learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n reset_num_timesteps: bool = True,\n tb_log_name: str = \"run\",\n progress_bar: bool = False,\n ) -> Tuple[int, BaseCallback]:\n \"\"\"\n Initialize different variables needed for training.\n\n :param total_timesteps: The total number of samples (env steps) to train on\n :param callback: Callback(s) called at every step with state of the algorithm.\n :param reset_num_timesteps: Whether to reset or not the ``num_timesteps`` attribute\n :param tb_log_name: the name of the run for tensorboard log\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: Total timesteps and callback(s)\n \"\"\"\n self.start_time = time.time_ns()\n\n if self.ep_info_buffer is None or reset_num_timesteps:\n # Initialize buffers if they don't exist, or reinitialize if resetting counters\n self.ep_info_buffer = deque(maxlen=100)\n self.ep_success_buffer = deque(maxlen=100)\n\n if self.action_noise is not None:\n self.action_noise.reset()\n\n if reset_num_timesteps:\n self.num_timesteps = 0\n self._episode_num = 0\n else:\n # Make sure training timesteps are ahead of the internal counter\n total_timesteps += self.num_timesteps\n self._total_timesteps = total_timesteps\n self._num_timesteps_at_start = self.num_timesteps\n\n # Avoid resetting the environment when calling ``.learn()`` consecutive times\n if reset_num_timesteps or self._last_obs is None:\n self._last_obs = self.env.reset() # pytype: disable=annotation-type-mismatch\n self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool)\n # Retrieve unnormalized observation for saving into the buffer\n if self._vec_normalize_env is not None:\n self._last_original_obs = self._vec_normalize_env.get_original_obs()\n\n # Configure logger's outputs if no logger was passed\n if not self._custom_logger:\n self._logger = utils.configure_logger(self.verbose, self.tensorboard_log, tb_log_name, reset_num_timesteps)\n\n # Create eval callback if needed\n callback = self._init_callback(callback, progress_bar)\n\n return total_timesteps, callback\n\n def _update_info_buffer(self, infos: List[Dict[str, Any]], dones: Optional[np.ndarray] = None) -> None:\n \"\"\"\n Retrieve reward, episode length, episode success and update the buffer\n if using Monitor wrapper or a GoalEnv.\n\n :param infos: List of additional information about the transition.\n :param dones: Termination signals\n \"\"\"\n if dones is None:\n dones = np.array([False] * len(infos))\n for idx, info in enumerate(infos):\n maybe_ep_info = info.get(\"episode\")\n maybe_is_success = info.get(\"is_success\")\n if maybe_ep_info is not None:\n self.ep_info_buffer.extend([maybe_ep_info])\n if maybe_is_success is not None and dones[idx]:\n self.ep_success_buffer.append(maybe_is_success)\n\n def get_env(self) -> Optional[VecEnv]:\n \"\"\"\n Returns the current environment (can be None if not defined).\n\n :return: The current environment\n \"\"\"\n return self.env\n\n def get_vec_normalize_env(self) -> Optional[VecNormalize]:\n \"\"\"\n Return the ``VecNormalize`` wrapper of the training env\n if it exists.\n\n :return: The ``VecNormalize`` env.\n \"\"\"\n return self._vec_normalize_env\n\n def set_env(self, env: GymEnv, force_reset: bool = True) -> None:\n \"\"\"\n Checks the validity of the environment, and if it is coherent, set it as the current environment.\n Furthermore wrap any non vectorized env into a vectorized\n checked parameters:\n - observation_space\n - action_space\n\n :param env: The environment for learning a policy\n :param force_reset: Force call to ``reset()`` before training\n to avoid unexpected behavior.\n See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n \"\"\"\n # if it is not a VecEnv, make it a VecEnv\n # and do other transformations (dict obs, image transpose) if needed\n env = self._wrap_env(env, self.verbose)\n assert env.num_envs == self.n_envs, (\n \"The number of environments to be set is different from the number of environments in the model: \"\n f\"({env.num_envs} != {self.n_envs}), whereas `set_env` requires them to be the same. To load a model with \"\n f\"a different number of environments, you must use `{self.__class__.__name__}.load(path, env)` instead\"\n )\n # Check that the observation spaces match\n check_for_correct_spaces(env, self.observation_space, self.action_space)\n # Update VecNormalize object\n # otherwise the wrong env may be used, see https://github.com/DLR-RM/stable-baselines3/issues/637\n self._vec_normalize_env = unwrap_vec_normalize(env)\n\n # Discard `_last_obs`, this will force the env to reset before training\n # See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n if force_reset:\n self._last_obs = None\n\n self.n_envs = env.num_envs\n self.env = env\n\n @abstractmethod\n def learn(\n self: SelfBaseAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 100,\n tb_log_name: str = \"run\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfBaseAlgorithm:\n \"\"\"\n Return a trained model.\n\n :param total_timesteps: The total number of samples (env steps) to train on\n :param callback: callback(s) called at every step with state of the algorithm.\n :param log_interval: The number of timesteps before logging.\n :param tb_log_name: the name of the run for TensorBoard logging\n :param reset_num_timesteps: whether or not to reset the current timestep number (used in logging)\n :param progress_bar: Display a progress bar using tqdm and rich.\n :return: the trained model\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n return self.policy.predict(observation, state, episode_start, deterministic)\n\n def set_random_seed(self, seed: Optional[int] = None) -> None:\n \"\"\"\n Set the seed of the pseudo-random generators\n (python, numpy, pytorch, gym, action_space)\n\n :param seed:\n \"\"\"\n if seed is None:\n return\n set_random_seed(seed, using_cuda=self.device.type == th.device(\"cuda\").type)\n self.action_space.seed(seed)\n # self.env is always a VecEnv\n if self.env is not None:\n self.env.seed(seed)\n\n def set_parameters(\n self,\n load_path_or_dict: Union[str, Dict[str, Dict]],\n exact_match: bool = True,\n device: Union[th.device, str] = \"auto\",\n ) -> None:\n \"\"\"\n Load parameters from a given zip-file or a nested dictionary containing parameters for\n different modules (see ``get_parameters``).\n\n :param load_path_or_iter: Location of the saved data (path or file-like, see ``save``), or a nested\n dictionary containing nn.Module parameters used by the policy. The dictionary maps\n object names to a state-dictionary returned by ``torch.nn.Module.state_dict()``.\n :param exact_match: If True, the given parameters should include parameters for each\n module and each of their parameters, otherwise raises an Exception. If set to False, this\n can be used to update only specific parameters.\n :param device: Device on which the code should run.\n \"\"\"\n params = None\n if isinstance(load_path_or_dict, dict):\n params = load_path_or_dict\n else:\n _, params, _ = load_from_zip_file(load_path_or_dict, device=device)\n\n # Keep track which objects were updated.\n # `_get_torch_save_params` returns [params, other_pytorch_variables].\n # We are only interested in former here.\n objects_needing_update = set(self._get_torch_save_params()[0])\n updated_objects = set()\n\n for name in params:\n attr = None\n try:\n attr = recursive_getattr(self, name)\n except Exception as e:\n # What errors recursive_getattr could throw? KeyError, but\n # possible something else too (e.g. if key is an int?).\n # Catch anything for now.\n raise ValueError(f\"Key {name} is an invalid object name.\") from e\n\n if isinstance(attr, th.optim.Optimizer):\n # Optimizers do not support \"strict\" keyword...\n # Seems like they will just replace the whole\n # optimizer state with the given one.\n # On top of this, optimizer state-dict\n # seems to change (e.g. first ``optim.step()``),\n # which makes comparing state dictionary keys\n # invalid (there is also a nesting of dictionaries\n # with lists with dictionaries with ...), adding to the\n # mess.\n #\n # TL;DR: We might not be able to reliably say\n # if given state-dict is missing keys.\n #\n # Solution: Just load the state-dict as is, and trust\n # the user has provided a sensible state dictionary.\n attr.load_state_dict(params[name])\n else:\n # Assume attr is th.nn.Module\n attr.load_state_dict(params[name], strict=exact_match)\n updated_objects.add(name)\n\n if exact_match and updated_objects != objects_needing_update:\n raise ValueError(\n \"Names of parameters do not match agents' parameters: \"\n f\"expected {objects_needing_update}, got {updated_objects}\"\n )\n\n @classmethod # noqa: C901\n def load(\n cls: Type[SelfBaseAlgorithm],\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n env: Optional[GymEnv] = None,\n device: Union[th.device, str] = \"auto\",\n custom_objects: Optional[Dict[str, Any]] = None,\n print_system_info: bool = False,\n force_reset: bool = True,\n **kwargs,\n ) -> SelfBaseAlgorithm:\n \"\"\"\n Load the model from a zip-file.\n Warning: ``load`` re-creates the model from scratch, it does not update it in-place!\n For an in-place load use ``set_parameters`` instead.\n\n :param path: path to the file (or a file-like) where to\n load the agent from\n :param env: the new environment to run the loaded model on\n (can be None if you only need prediction from a trained model) has priority over any saved environment\n :param device: Device on which the code should run.\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :param print_system_info: Whether to print system info from the saved model\n and the current system info (useful to debug loading issues)\n :param force_reset: Force call to ``reset()`` before training\n to avoid unexpected behavior.\n See https://github.com/DLR-RM/stable-baselines3/issues/597\n :param kwargs: extra arguments to change the model when loading\n :return: new model instance with loaded parameters\n \"\"\"\n if print_system_info:\n print(\"== CURRENT SYSTEM INFO ==\")\n get_system_info()\n\n data, params, pytorch_variables = load_from_zip_file(\n path,\n device=device,\n custom_objects=custom_objects,\n print_system_info=print_system_info,\n )\n\n # Remove stored device information and replace with ours\n if \"policy_kwargs\" in data:\n if \"device\" in data[\"policy_kwargs\"]:\n del data[\"policy_kwargs\"][\"device\"]\n\n if \"policy_kwargs\" in kwargs and kwargs[\"policy_kwargs\"] != data[\"policy_kwargs\"]:\n raise ValueError(\n f\"The specified policy kwargs do not equal the stored policy kwargs.\"\n f\"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}\"\n )\n\n if \"observation_space\" not in data or \"action_space\" not in data:\n raise KeyError(\"The observation_space and action_space were not given, can't verify new environments\")\n\n if env is not None:\n # Wrap first if needed\n env = cls._wrap_env(env, data[\"verbose\"])\n # Check if given env is valid\n check_for_correct_spaces(env, data[\"observation_space\"], data[\"action_space\"])\n # Discard `_last_obs`, this will force the env to reset before training\n # See issue https://github.com/DLR-RM/stable-baselines3/issues/597\n if force_reset and data is not None:\n data[\"_last_obs\"] = None\n # `n_envs` must be updated. See issue https://github.com/DLR-RM/stable-baselines3/issues/1018\n if data is not None:\n data[\"n_envs\"] = env.num_envs\n else:\n # Use stored env, if one exists. If not, continue as is (can be used for predict)\n if \"env\" in data:\n env = data[\"env\"]\n\n # noinspection PyArgumentList\n from stable_baselines3.dqn_soft_residual.policies import ResidualSoftCnnPolicy, ResidualSoftMlpPolicy, ResidualSoftMultiInputPolicy\n from stable_baselines3.sac_residual.policies import ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy\n ResidualPolicies = [ResidualSoftCnnPolicy, ResidualSoftMlpPolicy, ResidualSoftMultiInputPolicy, ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy]\n if (data[\"policy_class\"] in ResidualPolicies):\n model = cls( # pytype: disable=not-instantiable,wrong-keyword-args\n policy=data[\"policy_class\"],\n prior_model_path = data[\"prior_model_path\"],\n env=env,\n device=device,\n _init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args\n )\n else:\n model = cls( # pytype: disable=not-instantiable,wrong-keyword-args\n policy=data[\"policy_class\"],\n env=env,\n device=device,\n _init_setup_model=False, # pytype: disable=not-instantiable,wrong-keyword-args\n )\n\n # load parameters\n model.__dict__.update(data)\n model.__dict__.update(kwargs)\n model._setup_model()\n\n try:\n # put state_dicts back in place\n model.set_parameters(params, exact_match=True, device=device)\n except RuntimeError as e:\n # Patch to load Policy saved using SB3 < 1.7.0\n # the error is probably due to old policy being loaded\n # See https://github.com/DLR-RM/stable-baselines3/issues/1233\n if \"pi_features_extractor\" in str(e) and \"Missing key(s) in state_dict\" in str(e):\n model.set_parameters(params, exact_match=False, device=device)\n warnings.warn(\n \"You are probably loading a model saved with SB3 < 1.7.0, \"\n \"we deactivated exact_match so you can save the model \"\n \"again to avoid issues in the future \"\n \"(see https://github.com/DLR-RM/stable-baselines3/issues/1233 for more info). \"\n f\"Original error: {e} \\n\"\n \"Note: the model should still work fine, this only a warning.\"\n )\n else:\n raise e\n\n # put other pytorch variables back in place\n if pytorch_variables is not None:\n for name in pytorch_variables:\n # Skip if PyTorch variable was not defined (to ensure backward compatibility).\n # This happens when using SAC/TQC.\n # SAC has an entropy coefficient which can be fixed or optimized.\n # If it is optimized, an additional PyTorch variable `log_ent_coef` is defined,\n # otherwise it is initialized to `None`.\n if pytorch_variables[name] is None:\n continue\n # Set the data attribute directly to avoid issue when using optimizers\n # See https://github.com/DLR-RM/stable-baselines3/issues/391\n recursive_setattr(model, name + \".data\", pytorch_variables[name].data)\n\n # Sample gSDE exploration matrix, so it uses the right device\n # see issue #44\n if model.use_sde:\n model.policy.reset_noise() # pytype: disable=attribute-error\n return model\n\n def get_parameters(self) -> Dict[str, Dict]:\n \"\"\"\n Return the parameters of the agent. This includes parameters from different networks, e.g.\n critics (value functions) and policies (pi functions).\n\n :return: Mapping of from names of the objects to PyTorch state-dicts.\n \"\"\"\n state_dicts_names, _ = self._get_torch_save_params()\n params = {}\n for name in state_dicts_names:\n attr = recursive_getattr(self, name)\n # Retrieve state dict\n params[name] = attr.state_dict()\n return params\n\n def save(\n self,\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n exclude: Optional[Iterable[str]] = None,\n include: Optional[Iterable[str]] = None,\n ) -> None:\n \"\"\"\n Save all the attributes of the object and the model parameters in a zip-file.\n\n :param path: path to the file where the rl agent should be saved\n :param exclude: name of parameters that should be excluded in addition to the default ones\n :param include: name of parameters that might be excluded but should be included anyway\n \"\"\"\n # Copy parameter list so we don't mutate the original dict\n data = self.__dict__.copy()\n\n # Exclude is union of specified parameters (if any) and standard exclusions\n if exclude is None:\n exclude = []\n exclude = set(exclude).union(self._excluded_save_params())\n\n # Do not exclude params if they are specifically included\n if include is not None:\n exclude = exclude.difference(include)\n\n state_dicts_names, torch_variable_names = self._get_torch_save_params()\n all_pytorch_variables = state_dicts_names + torch_variable_names\n for torch_var in all_pytorch_variables:\n # We need to get only the name of the top most module as we'll remove that\n var_name = torch_var.split(\".\")[0]\n # Any params that are in the save vars must not be saved by data\n exclude.add(var_name)\n\n # Remove parameter entries of parameters which are to be excluded\n for param_name in exclude:\n data.pop(param_name, None)\n\n # Build dict of torch variables\n pytorch_variables = None\n if torch_variable_names is not None:\n pytorch_variables = {}\n for name in torch_variable_names:\n attr = recursive_getattr(self, name)\n pytorch_variables[name] = attr\n\n # Build dict of state_dicts\n params_to_save = self.get_parameters()\n\n save_to_zip_file(path, data=data, params=params_to_save, pytorch_variables=pytorch_variables)" }, { "identifier": "BaseCallback", "path": "stable_baselines3/common/callbacks.py", "snippet": "class BaseCallback(ABC):\n \"\"\"\n Base class for callback.\n\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n \"\"\"\n\n def __init__(self, verbose: int = 0):\n super().__init__()\n # The RL model\n self.model = None # type: Optional[base_class.BaseAlgorithm]\n # An alias for self.model.get_env(), the environment used for training\n self.training_env = None # type: Union[gym.Env, VecEnv, None]\n # Number of time the callback was called\n self.n_calls = 0 # type: int\n # n_envs * n times env.step() was called\n self.num_timesteps = 0 # type: int\n self.verbose = verbose\n self.locals: Dict[str, Any] = {}\n self.globals: Dict[str, Any] = {}\n self.logger = None\n # Sometimes, for event callback, it is useful\n # to have access to the parent object\n self.parent = None # type: Optional[BaseCallback]\n\n # Type hint as string to avoid circular import\n def init_callback(self, model: \"base_class.BaseAlgorithm\") -> None:\n \"\"\"\n Initialize the callback by saving references to the\n RL model and the training environment for convenience.\n \"\"\"\n self.model = model\n self.training_env = model.get_env()\n self.logger = model.logger\n self._init_callback()\n\n def _init_callback(self) -> None:\n pass\n\n def on_training_start(self, locals_: Dict[str, Any], globals_: Dict[str, Any]) -> None:\n # Those are reference and will be updated automatically\n self.locals = locals_\n self.globals = globals_\n # Update num_timesteps in case training was done before\n self.num_timesteps = self.model.num_timesteps\n self._on_training_start()\n\n def _on_training_start(self) -> None:\n pass\n\n def on_rollout_start(self) -> None:\n self._on_rollout_start()\n\n def _on_rollout_start(self) -> None:\n pass\n\n @abstractmethod\n def _on_step(self) -> bool:\n \"\"\"\n :return: If the callback returns False, training is aborted early.\n \"\"\"\n return True\n\n def on_step(self) -> bool:\n \"\"\"\n This method will be called by the model after each call to ``env.step()``.\n\n For child callback (of an ``EventCallback``), this will be called\n when the event is triggered.\n\n :return: If the callback returns False, training is aborted early.\n \"\"\"\n self.n_calls += 1\n self.num_timesteps = self.model.num_timesteps\n\n return self._on_step()\n\n def on_training_end(self) -> None:\n self._on_training_end()\n\n def _on_training_end(self) -> None:\n pass\n\n def on_rollout_end(self) -> None:\n self._on_rollout_end()\n\n def _on_rollout_end(self) -> None:\n pass\n\n def update_locals(self, locals_: Dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n self.locals.update(locals_)\n self.update_child_locals(locals_)\n\n def update_child_locals(self, locals_: Dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables on sub callbacks.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n pass" }, { "identifier": "evaluate_policy", "path": "stable_baselines3/common/evaluation.py", "snippet": "def evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: Union[gym.Env, VecEnv],\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,\n reward_threshold: Optional[float] = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:\n \"\"\"\n Runs policy for ``n_eval_episodes`` episodes and returns average reward.\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a `predict` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to do additional checks,\n called after each step. Gets locals() and globals() passed as parameters.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean reward per episode, std of reward per episode.\n Returns ([float], [int]) when ``return_episode_rewards`` is True, first\n list containing per-episode rewards and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env])\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(observations, state=states, episode_start=episode_starts, deterministic=deterministic)\n observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "load_from_zip_file", "path": "stable_baselines3/common/save_util.py", "snippet": "def load_from_zip_file(\n load_path: Union[str, pathlib.Path, io.BufferedIOBase],\n load_data: bool = True,\n custom_objects: Optional[Dict[str, Any]] = None,\n device: Union[th.device, str] = \"auto\",\n verbose: int = 0,\n print_system_info: bool = False,\n) -> (Tuple[Optional[Dict[str, Any]], Optional[TensorDict], Optional[TensorDict]]):\n \"\"\"\n Load model data from a .zip archive\n\n :param load_path: Where to load the model from\n :param load_data: Whether we should load and return data\n (class parameters). Mainly used by 'load_parameters' to only load model parameters (weights)\n :param custom_objects: Dictionary of objects to replace\n upon loading. If a variable is present in this dictionary as a\n key, it will not be deserialized and the corresponding item\n will be used instead. Similar to custom_objects in\n ``keras.models.load_model``. Useful when you have an object in\n file that can not be deserialized.\n :param device: Device on which the code should run.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param print_system_info: Whether to print or not the system info\n about the saved model.\n :return: Class parameters, model state_dicts (aka \"params\", dict of state_dict)\n and dict of pytorch variables\n \"\"\"\n load_path = open_path(load_path, \"r\", verbose=verbose, suffix=\"zip\")\n\n # set device to cpu if cuda is not available\n device = get_device(device=device)\n\n # Open the zip archive and load data\n try:\n with zipfile.ZipFile(load_path) as archive:\n namelist = archive.namelist()\n # If data or parameters is not in the\n # zip archive, assume they were stored\n # as None (_save_to_file_zip allows this).\n data = None\n pytorch_variables = None\n params = {}\n\n # Debug system info first\n if print_system_info:\n if \"system_info.txt\" in namelist:\n print(\"== SAVED MODEL SYSTEM INFO ==\")\n print(archive.read(\"system_info.txt\").decode())\n else:\n warnings.warn(\n \"The model was saved with SB3 <= 1.2.0 and thus cannot print system information.\",\n UserWarning,\n )\n\n if \"data\" in namelist and load_data:\n # Load class parameters that are stored\n # with either JSON or pickle (not PyTorch variables).\n json_data = archive.read(\"data\").decode()\n data = json_to_data(json_data, custom_objects=custom_objects)\n\n # Check for all .pth files and load them using th.load.\n # \"pytorch_variables.pth\" stores PyTorch variables, and any other .pth\n # files store state_dicts of variables with custom names (e.g. policy, policy.optimizer)\n pth_files = [file_name for file_name in namelist if os.path.splitext(file_name)[1] == \".pth\"]\n for file_path in pth_files:\n with archive.open(file_path, mode=\"r\") as param_file:\n # File has to be seekable, but param_file is not, so load in BytesIO first\n # fixed in python >= 3.7\n file_content = io.BytesIO()\n file_content.write(param_file.read())\n # go to start of file\n file_content.seek(0)\n # Load the parameters with the right ``map_location``.\n # Remove \".pth\" ending with splitext\n th_object = th.load(file_content, map_location=device)\n # \"tensors.pth\" was renamed \"pytorch_variables.pth\" in v0.9.0, see PR #138\n if file_path == \"pytorch_variables.pth\" or file_path == \"tensors.pth\":\n # PyTorch variables (not state_dicts)\n pytorch_variables = th_object\n else:\n # State dicts. Store into params dictionary\n # with same name as in .zip file (without .pth)\n params[os.path.splitext(file_path)[0]] = th_object\n except zipfile.BadZipFile as e:\n # load_path wasn't a zip file\n raise ValueError(f\"Error: the file {load_path} wasn't a zip-file\") from e\n return data, params, pytorch_variables" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "get_schedule_fn", "path": "stable_baselines3/common/utils.py", "snippet": "def get_schedule_fn(value_schedule: Union[Schedule, float, int]) -> Schedule:\n \"\"\"\n Transform (if needed) learning rate and clip range (for PPO)\n to callable.\n\n :param value_schedule: Constant value of schedule function\n :return: Schedule function (can return constant value)\n \"\"\"\n # If the passed schedule is a float\n # create a constant function\n if isinstance(value_schedule, (float, int)):\n # Cast to float to avoid errors\n value_schedule = constant_fn(float(value_schedule))\n else:\n assert callable(value_schedule)\n return value_schedule" }, { "identifier": "safe_mean", "path": "stable_baselines3/common/utils.py", "snippet": "def safe_mean(arr: Union[np.ndarray, list, deque]) -> np.ndarray:\n \"\"\"\n Compute the mean of an array if there is at least one element.\n For empty array, return NaN. It is used for logging only.\n\n :param arr: Numpy array or list of values\n :return:\n \"\"\"\n return np.nan if len(arr) == 0 else np.mean(arr)" }, { "identifier": "ARSPolicy", "path": "sb3_contrib/ars/policies.py", "snippet": "class ARSPolicy(BasePolicy):\nclass ARSLinearPolicy(ARSPolicy):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n with_bias: bool = True,\n squash_output: bool = True,\n ):\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def forward(self, obs: th.Tensor) -> th.Tensor:\n def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n with_bias: bool = False,\n squash_output: bool = False,\n ):" }, { "identifier": "AsyncEval", "path": "sb3_contrib/common/vec_env/async_eval.py", "snippet": "class AsyncEval:\n \"\"\"\n Helper class to do asynchronous evaluation of different policies with multiple processes.\n It is useful when implementing population based methods like Evolution Strategies (ES),\n Cross Entropy Method (CEM) or Augmented Random Search (ARS).\n\n .. warning::\n\n Only 'forkserver' and 'spawn' start methods are thread-safe,\n which is important to avoid race conditions.\n However, compared to\n 'fork' they incur a small start-up cost and have restrictions on\n global variables. With those methods, users must wrap the code in an\n ``if __name__ == \"__main__\":`` block.\n For more information, see the multiprocessing documentation.\n\n :param envs_fn: Vectorized environments to run in subprocesses (callable)\n :param train_policy: The policy object that will load the different candidate\n weights.\n :param start_method: method used to start the subprocesses.\n Must be one of the methods returned by ``multiprocessing.get_all_start_methods()``.\n Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.\n :param n_eval_episodes: The number of episodes to test each agent\n \"\"\"\n\n def __init__(\n self,\n envs_fn: List[Callable[[], VecEnv]],\n train_policy: BasePolicy,\n start_method: Optional[str] = None,\n n_eval_episodes: int = 1,\n ):\n self.waiting = False\n self.closed = False\n n_envs = len(envs_fn)\n\n if start_method is None:\n # Fork is not a thread safe method (see issue #217)\n # but is more user friendly (does not require to wrap the code in\n # a `if __name__ == \"__main__\":`)\n forkserver_available = \"forkserver\" in mp.get_all_start_methods()\n start_method = \"forkserver\" if forkserver_available else \"spawn\"\n ctx = mp.get_context(start_method)\n\n self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])\n self.processes = []\n for work_remote, remote, worker_env in zip(self.work_remotes, self.remotes, envs_fn):\n args = (\n work_remote,\n remote,\n CloudpickleWrapper(worker_env),\n CloudpickleWrapper(train_policy),\n n_eval_episodes,\n )\n # daemon=True: if the main process crashes, we should not cause things to hang\n process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error\n process.start()\n self.processes.append(process)\n work_remote.close()\n\n def send_jobs(self, candidate_weights: th.Tensor, pop_size: int) -> None:\n \"\"\"\n Send jobs to the workers to evaluate new candidates.\n\n :param candidate_weights: The weights to be evaluated.\n :pop_size: The number of candidate (size of the population)\n \"\"\"\n jobs_per_worker = defaultdict(list)\n for weights_idx in range(pop_size):\n jobs_per_worker[weights_idx % len(self.remotes)].append((weights_idx, candidate_weights[weights_idx]))\n\n for remote_idx, remote in enumerate(self.remotes):\n remote.send((\"eval\", jobs_per_worker[remote_idx]))\n self.waiting = True\n\n def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:\n \"\"\"\n Seed the environments.\n\n :param seed: The seed for the pseudo-random generators.\n :return:\n \"\"\"\n for idx, remote in enumerate(self.remotes):\n remote.send((\"seed\", seed + idx))\n return [remote.recv() for remote in self.remotes]\n\n def get_results(self) -> List[Tuple[int, Tuple[np.ndarray, np.ndarray]]]:\n \"\"\"\n Retreive episode rewards and lengths from each worker\n for all candidates (there might be multiple candidates per worker)\n\n :return: A list of tuples containing each candidate index and its\n result (episodic reward and episode length)\n \"\"\"\n results = [remote.recv() for remote in self.remotes]\n flat_results = [result for worker_results in results for result in worker_results]\n self.waiting = False\n return flat_results\n\n def get_obs_rms(self) -> List[RunningMeanStd]:\n \"\"\"\n Retrieve the observation filters (observation running mean std)\n of each process, they will be combined in the main process.\n Synchronisation is done afterward using ``sync_obs_rms()``.\n :return: A list of ``RunningMeanStd`` objects (one per process)\n \"\"\"\n for remote in self.remotes:\n remote.send((\"get_obs_rms\", None))\n return [remote.recv() for remote in self.remotes]\n\n def sync_obs_rms(self, obs_rms: RunningMeanStd) -> None:\n \"\"\"\n Synchronise (and update) the observation filters\n (observation running mean std)\n :param obs_rms: The updated ``RunningMeanStd`` to be used\n by workers for normalizing observations.\n \"\"\"\n for remote in self.remotes:\n remote.send((\"sync_obs_rms\", obs_rms))\n\n def close(self) -> None:\n \"\"\"\n Close the processes.\n \"\"\"\n if self.closed:\n return\n if self.waiting:\n for remote in self.remotes:\n remote.recv()\n for remote in self.remotes:\n remote.send((\"close\", None))\n for process in self.processes:\n process.join()\n self.closed = True" } ]
import copy import sys import time import warnings import numpy as np import torch as th import torch.nn.utils from functools import partial from typing import Any, Dict, Optional, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.base_class import BaseAlgorithm from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.save_util import load_from_zip_file from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_schedule_fn, safe_mean from sb3_contrib.ars.policies import ARSPolicy, LinearPolicy, MlpPolicy from sb3_contrib.common.vec_env.async_eval import AsyncEval
17,039
:param episode_lengths: List containing per-episode lengths (in number of steps) """ # Mimic Monitor Wrapper infos = [ {"episode": {"r": episode_reward, "l": episode_length}} for episode_reward, episode_length in zip(episode_rewards, episode_lengths) ] self._update_info_buffer(infos) def _trigger_callback( self, _locals: Dict[str, Any], _globals: Dict[str, Any], callback: BaseCallback, n_envs: int, ) -> None: """ Callback passed to the ``evaluate_policy()`` helper in order to increment the number of timesteps and trigger events in the single process version. :param _locals: :param _globals: :param callback: Callback that will be called at every step :param n_envs: Number of environments """ self.num_timesteps += n_envs callback.on_step() def evaluate_candidates( self, candidate_weights: th.Tensor, callback: BaseCallback, async_eval: Optional[AsyncEval] ) -> th.Tensor: """ Evaluate each candidate. :param candidate_weights: The candidate weights to be evaluated. :param callback: Callback that will be called at each step (or after evaluation in the multiprocess version) :param async_eval: The object for asynchronous evaluation of candidates. :return: The episodic return for each candidate. """ batch_steps = 0 # returns == sum of rewards candidate_returns = th.zeros(self.pop_size, device=self.device) train_policy = copy.deepcopy(self.policy) # Empty buffer to show only mean over one iteration (one set of candidates) in the logs self.ep_info_buffer = [] callback.on_rollout_start() if async_eval is not None: # Multiprocess asynchronous version async_eval.send_jobs(candidate_weights, self.pop_size) results = async_eval.get_results() for weights_idx, (episode_rewards, episode_lengths) in results: # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += np.sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Combine the filter stats of each process for normalization for worker_obs_rms in async_eval.get_obs_rms(): if self._vec_normalize_env is not None: # worker_obs_rms.count -= self.old_count self._vec_normalize_env.obs_rms.combine(worker_obs_rms) # Hack: don't count timesteps twice (between the two are synced) # otherwise it will lead to overflow, # in practice we would need two RunningMeanStats self._vec_normalize_env.obs_rms.count -= self.old_count # Synchronise VecNormalize if needed if self._vec_normalize_env is not None: async_eval.sync_obs_rms(self._vec_normalize_env.obs_rms.copy()) self.old_count = self._vec_normalize_env.obs_rms.count # Hack to have Callback events for _ in range(batch_steps // len(async_eval.remotes)): self.num_timesteps += len(async_eval.remotes) callback.on_step() else: # Single process, synchronous version for weights_idx in range(self.pop_size): # Load current candidate weights train_policy.load_from_vector(candidate_weights[weights_idx].cpu()) # Evaluate the candidate episode_rewards, episode_lengths = evaluate_policy( train_policy, self.env, n_eval_episodes=self.n_eval_episodes, return_episode_rewards=True, # Increment num_timesteps too (slight mismatch with multi envs) callback=partial(self._trigger_callback, callback=callback, n_envs=self.env.num_envs), warn=False, ) # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Note: we increment the num_timesteps inside the evaluate_policy() # however when using multiple environments, there will be a slight # mismatch between the number of timesteps used and the number # of calls to the step() method (cf. implementation of evaluate_policy()) # self.num_timesteps += batch_steps callback.on_rollout_end() return candidate_returns def _log_and_dump(self) -> None: """ Dump information to the logger. """ time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon) fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed) if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
SelfARS = TypeVar("SelfARS", bound="ARS") class ARS(BaseAlgorithm): """ Augmented Random Search: https://arxiv.org/abs/1803.07055 Original implementation: https://github.com/modestyachts/ARS C++/Cuda Implementation: https://github.com/google-research/tiny-differentiable-simulator/ 150 LOC Numpy Implementation: https://github.com/alexis-jacq/numpy_ARS/blob/master/asr.py :param policy: The policy to train, can be an instance of ``ARSPolicy``, or a string from ["LinearPolicy", "MlpPolicy"] :param env: The environment to train on, may be a string if registered with gym :param n_delta: How many random perturbations of the policy to try at each update step. :param n_top: How many of the top delta to use in each update step. Default is n_delta :param learning_rate: Float or schedule for the step size :param delta_std: Float or schedule for the exploration noise :param zero_policy: Boolean determining if the passed policy should have it's weights zeroed before training. :param alive_bonus_offset: Constant added to the reward at each step, used to cancel out alive bonuses. :param n_eval_episodes: Number of episodes to evaluate each candidate. :param policy_kwargs: Keyword arguments to pass to the policy on creation :param tensorboard_log: String with the directory to put tensorboard logs: :param seed: Random seed for the training :param verbose: Verbosity level: 0 no output, 1 info, 2 debug :param device: Torch device to use for training, defaults to "cpu" :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "LinearPolicy": LinearPolicy, } def __init__( self, policy: Union[str, Type[ARSPolicy]], env: Union[GymEnv, str], n_delta: int = 8, n_top: Optional[int] = None, learning_rate: Union[float, Schedule] = 0.02, delta_std: Union[float, Schedule] = 0.05, zero_policy: bool = True, alive_bonus_offset: float = 0, n_eval_episodes: int = 1, policy_kwargs: Optional[Dict[str, Any]] = None, tensorboard_log: Optional[str] = None, seed: Optional[int] = None, verbose: int = 0, device: Union[th.device, str] = "cpu", _init_setup_model: bool = True, ): super().__init__( policy, env, learning_rate=learning_rate, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, device=device, supported_action_spaces=(spaces.Box, spaces.Discrete), support_multi_env=True, seed=seed, ) self.n_delta = n_delta self.pop_size = 2 * n_delta self.delta_std_schedule = get_schedule_fn(delta_std) self.n_eval_episodes = n_eval_episodes if n_top is None: n_top = n_delta # Make sure our hyper parameters are valid and auto correct them if they are not if n_top > n_delta: warnings.warn(f"n_top = {n_top} > n_delta = {n_top}, setting n_top = n_delta") n_top = n_delta self.n_top = n_top self.alive_bonus_offset = alive_bonus_offset self.zero_policy = zero_policy self.weights = None # Need to call init model to initialize weight self.processes = None # Keep track of how many steps where elapsed before a new rollout # Important for syncing observation normalization between workers self.old_count = 0 if _init_setup_model: self._setup_model() def _setup_model(self) -> None: self._setup_lr_schedule() self.set_random_seed(self.seed) self.policy = self.policy_class(self.observation_space, self.action_space, **self.policy_kwargs) self.policy = self.policy.to(self.device) self.weights = th.nn.utils.parameters_to_vector(self.policy.parameters()).detach() self.n_params = len(self.weights) if self.zero_policy: self.weights = th.zeros_like(self.weights, requires_grad=False) self.policy.load_from_vector(self.weights.cpu()) def _mimic_monitor_wrapper(self, episode_rewards: np.ndarray, episode_lengths: np.ndarray) -> None: """ Helper to mimic Monitor wrapper and report episode statistics (mean reward, mean episode length). :param episode_rewards: List containing per-episode rewards :param episode_lengths: List containing per-episode lengths (in number of steps) """ # Mimic Monitor Wrapper infos = [ {"episode": {"r": episode_reward, "l": episode_length}} for episode_reward, episode_length in zip(episode_rewards, episode_lengths) ] self._update_info_buffer(infos) def _trigger_callback( self, _locals: Dict[str, Any], _globals: Dict[str, Any], callback: BaseCallback, n_envs: int, ) -> None: """ Callback passed to the ``evaluate_policy()`` helper in order to increment the number of timesteps and trigger events in the single process version. :param _locals: :param _globals: :param callback: Callback that will be called at every step :param n_envs: Number of environments """ self.num_timesteps += n_envs callback.on_step() def evaluate_candidates( self, candidate_weights: th.Tensor, callback: BaseCallback, async_eval: Optional[AsyncEval] ) -> th.Tensor: """ Evaluate each candidate. :param candidate_weights: The candidate weights to be evaluated. :param callback: Callback that will be called at each step (or after evaluation in the multiprocess version) :param async_eval: The object for asynchronous evaluation of candidates. :return: The episodic return for each candidate. """ batch_steps = 0 # returns == sum of rewards candidate_returns = th.zeros(self.pop_size, device=self.device) train_policy = copy.deepcopy(self.policy) # Empty buffer to show only mean over one iteration (one set of candidates) in the logs self.ep_info_buffer = [] callback.on_rollout_start() if async_eval is not None: # Multiprocess asynchronous version async_eval.send_jobs(candidate_weights, self.pop_size) results = async_eval.get_results() for weights_idx, (episode_rewards, episode_lengths) in results: # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += np.sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Combine the filter stats of each process for normalization for worker_obs_rms in async_eval.get_obs_rms(): if self._vec_normalize_env is not None: # worker_obs_rms.count -= self.old_count self._vec_normalize_env.obs_rms.combine(worker_obs_rms) # Hack: don't count timesteps twice (between the two are synced) # otherwise it will lead to overflow, # in practice we would need two RunningMeanStats self._vec_normalize_env.obs_rms.count -= self.old_count # Synchronise VecNormalize if needed if self._vec_normalize_env is not None: async_eval.sync_obs_rms(self._vec_normalize_env.obs_rms.copy()) self.old_count = self._vec_normalize_env.obs_rms.count # Hack to have Callback events for _ in range(batch_steps // len(async_eval.remotes)): self.num_timesteps += len(async_eval.remotes) callback.on_step() else: # Single process, synchronous version for weights_idx in range(self.pop_size): # Load current candidate weights train_policy.load_from_vector(candidate_weights[weights_idx].cpu()) # Evaluate the candidate episode_rewards, episode_lengths = evaluate_policy( train_policy, self.env, n_eval_episodes=self.n_eval_episodes, return_episode_rewards=True, # Increment num_timesteps too (slight mismatch with multi envs) callback=partial(self._trigger_callback, callback=callback, n_envs=self.env.num_envs), warn=False, ) # Update reward to cancel out alive bonus if needed candidate_returns[weights_idx] = sum(episode_rewards) + self.alive_bonus_offset * sum(episode_lengths) batch_steps += sum(episode_lengths) self._mimic_monitor_wrapper(episode_rewards, episode_lengths) # Note: we increment the num_timesteps inside the evaluate_policy() # however when using multiple environments, there will be a slight # mismatch between the number of timesteps used and the number # of calls to the step() method (cf. implementation of evaluate_policy()) # self.num_timesteps += batch_steps callback.on_rollout_end() return candidate_returns def _log_and_dump(self) -> None: """ Dump information to the logger. """ time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon) fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed) if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
7
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _MLP(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'mlp'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n \"\"\"MLP config preprocessing\"\"\"\n # process mlp configs\n self.saved_model_config = model_config.copy()\n d_layers = []\n n_layers, first_dim, mid_dim, last_dim = \\\n (\n model_config.pop('n_layers'), model_config.pop('first_dim'),\n model_config.pop('mid_dim'), model_config.pop('last_dim')\n )\n for i in range(n_layers):\n if i == 0:\n d_layers.append(first_dim)\n elif i == n_layers - 1 and n_layers > 1:\n d_layers.append(last_dim)\n else:\n d_layers.append(mid_dim)\n model_config['d_layers'] = d_layers\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "FTTransformer", "path": "models/ft_transformer.py", "snippet": "class FTTransformer(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = rtdl.FTTransformer.make_baseline(\n n_num_features=n_num_features,\n cat_cardinalities=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'ft-transformer'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n self.saved_model_config = model_config.copy()\n # process ftt configs\n if 'ffn_d_factor' in model_config:\n model_config['ffn_d_hidden'] = \\\n int(model_config['d_token'] * model_config.pop('ffn_d_factor'))\n return model_config\n \n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "AutoInt", "path": "models/autoint.py", "snippet": "class AutoInt(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _AutoInt(\n d_numerical=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'autoint'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args,\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "DCNv2", "path": "models/dcnv2.py", "snippet": "class DCNv2(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _DCNv2(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'dcnv2'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "NODE", "path": "models/node_model.py", "snippet": "class NODE(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _NODE(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n tree_dim=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'node'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "TabModel", "path": "models/abstract.py", "snippet": "class TabModel(ABC):\n def __init__(self):\n self.model: Optional[nn.Module] = None # true model\n self.base_name = None # model type name\n self.device = None\n self.saved_model_config = None\n self.training_config = None\n self.meta_config = None\n self.post_init()\n\n def post_init(self):\n self.history = {\n 'train': {'loss': [], 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0}, \n 'val': {\n 'metric_name': None, 'metric': [], 'best_metric': None, \n 'log_loss': [], 'best_log_loss': None,\n 'best_epoch': None, 'best_step': None,\n 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0\n }, \n # 'test': {'loss': [], 'metric': [], 'final_metric': None},\n 'device': torch.cuda.get_device_name(),\n } # save metrics\n self.no_improvement = 0 # for dnn early stop\n \n def preproc_config(self, model_config: dict):\n \"\"\"default preprocessing for model configurations\"\"\"\n self.saved_model_config = model_config\n return model_config\n \n @abstractmethod\n def fit(\n self,\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n eval_set: Optional[Tuple[Union[torch.Tensor, np.ndarray]]],\n patience: int,\n task: str,\n training_args: dict,\n meta_args: Optional[dict],\n ):\n \"\"\"\n Training Model with Early Stop(optional)\n load best weights at the end\n \"\"\"\n pass\n \n def dnn_fit(\n self,\n *,\n dnn_fit_func: Optional[DNN_FIT_API] = None,\n # API for specical sampler like curriculum learning\n train_loader: Optional[Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal dataloader sampler if is None\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None,\n y_std: Optional[float] = None, # for RMSE\n eval_set: Tuple[torch.Tensor, np.ndarray] = None, # similar API as sk-learn\n patience: int = 0, # <= 0 without early stop\n task: str,\n training_args: dict,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_fit)\n if dnn_fit_func is None:\n dnn_fit_func = default_dnn_fit\n # meta args\n if meta_args is None:\n meta_args = {}\n meta_args.setdefault('save_path', f'results/{self.base_name}')\n if not os.path.exists(meta_args['save_path']):\n print('create new results dir: ', meta_args['save_path'])\n os.makedirs(meta_args['save_path'])\n self.meta_config = meta_args\n # optimzier and scheduler\n training_args.setdefault('optimizer', 'adamw')\n optimizer, scheduler = TabModel.make_optimizer(self.model, training_args)\n # data loader\n training_args.setdefault('batch_size', 64)\n training_args.setdefault('ghost_batch_size', None)\n if train_loader is not None:\n train_loader, missing_idx = train_loader\n training_args['batch_size'] = train_loader.batch_size\n else:\n train_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=training_args['batch_size'],\n shuffle=True,\n )\n if eval_set is not None:\n eval_set = eval_set[0] # only use the first dev set\n dev_loader = TabModel.prepare_tensor_loader(\n X_num=eval_set[0], X_cat=eval_set[1], ys=eval_set[2],\n batch_size=training_args['batch_size'],\n )\n else:\n dev_loader = None\n # training loops\n training_args.setdefault('max_epochs', 1000)\n # training_args.setdefault('report_frequency', 100) # same as save_freq\n # training_args.setdefault('save_frequency', 100) # save per 100 steps\n training_args.setdefault('patience', patience)\n training_args.setdefault('save_frequency', 'epoch') # save per epoch\n self.training_config = training_args\n\n steps_per_backward = 1 if training_args['ghost_batch_size'] is None \\\n else training_args['batch_size'] // training_args['ghost_batch_size']\n steps_per_epoch = len(train_loader)\n tot_step, tot_time = 0, 0\n for e in range(training_args['max_epochs']):\n self.model.train()\n tot_loss = 0\n for step, batch in enumerate(train_loader):\n optimizer.zero_grad()\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n logits, forward_time = dnn_fit_func(self.model, x_num, x_cat, y)\n loss = TabModel.compute_loss(logits, y, task)\n # backward\n start_time = time.time()\n loss.backward()\n backward_time = time.time() - start_time\n self.gradient_policy()\n tot_time += forward_time + backward_time\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n # print or save infos\n tot_step += 1\n tot_loss += loss.cpu().item()\n if isinstance(training_args['save_frequency'], int) \\\n and tot_step % training_args['save_frequency'] == 0:\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n if training_args['save_frequency'] == 'epoch':\n if hasattr(self.model, 'layer_masks'):\n print('layer_mask: ', self.model.layer_masks > 0)\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n \n @abstractmethod\n def predict(\n self,\n dev_loader: Optional[DataLoader],\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n task: str,\n return_probs: bool = True,\n return_metric: bool = True,\n return_loss: bool = True,\n meta_args: Optional[dict] = None,\n ):\n \"\"\"\n Prediction\n \"\"\"\n pass\n \n def dnn_predict(\n self,\n *,\n dnn_predict_func: Optional[DNN_PREDICT_API] = None,\n dev_loader: Optional[Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None, \n y_std: Optional[float] = None, # for RMSE\n task: str,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_predict)\n if dnn_predict_func is None:\n dnn_predict_func = default_dnn_predict\n if dev_loader is None:\n dev_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=128,\n )\n else:\n dev_loader, missing_idx = dev_loader\n # print(\"Evaluate...\")\n predictions, golds = [], []\n tot_time = 0\n self.model.eval()\n for batch in dev_loader:\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n with torch.no_grad():\n logits, used_time = dnn_predict_func(self.model, x_num, x_cat)\n tot_time += used_time\n predictions.append(logits)\n golds.append(y)\n self.model.train()\n predictions = torch.cat(predictions).squeeze(-1)\n golds = torch.cat(golds)\n if return_loss:\n loss = TabModel.compute_loss(predictions, golds, task).cpu().item()\n else:\n loss = None\n if return_probs and task != 'regression':\n predictions = (\n predictions.sigmoid()\n if task == 'binclass'\n else predictions.softmax(-1)\n )\n prediction_type = 'probs'\n elif task == 'regression':\n prediction_type = None\n else:\n prediction_type = 'logits'\n predictions = predictions.cpu().numpy()\n golds = golds.cpu().numpy()\n if return_metric:\n metric = TabModel.calculate_metric(\n golds, predictions,\n task, prediction_type, y_std\n )\n logloss = (\n log_loss(golds, np.stack([1-predictions, predictions], axis=1), labels=[0,1])\n if task == 'binclass'\n else log_loss(golds, predictions, labels=list(range(len(set(golds)))))\n if task == 'multiclass'\n else None\n )\n else:\n metric, logloss = None, None\n results = {'loss': loss, 'metric': metric, 'time': tot_time, 'log_loss': logloss}\n if meta_args is not None:\n self.save_prediction(meta_args['save_path'], results)\n return predictions, results\n \n def gradient_policy(self):\n \"\"\"For post porcess model gradient\"\"\"\n pass\n \n @abstractmethod\n def save(self, output_dir):\n \"\"\"\n Save model weights and configs,\n the following default save functions\n can be combined to override this function\n \"\"\"\n pass\n\n def save_pt_model(self, output_dir):\n print('saving pt model weights...')\n # save model params\n torch.save(self.model.state_dict(), Path(output_dir) / 'final.bin')\n \n def save_tree_model(self, output_dir):\n print('saving tree model...')\n pass\n\n def save_history(self, output_dir):\n # save metrics\n with open(Path(output_dir) / 'results.json', 'w') as f:\n json.dump(self.history, f, indent=4)\n \n def save_prediction(self, output_dir, results, file='prediction'):\n check_dir(output_dir)\n # save test results\n print(\"saving prediction results\")\n saved_results = {\n 'loss': results['loss'], \n 'metric_name': results['metric'][1], \n 'metric': results['metric'][0], \n 'time': results['time'],\n 'log_loss': results['log_loss'],\n }\n with open(Path(output_dir) / f'{file}.json', 'w') as f:\n json.dump(saved_results, f, indent=4)\n \n def save_config(self, output_dir):\n def serialize(config: dict):\n for key in config:\n # serialized object to store yaml or json files \n if any(isinstance(config[key], obj) for obj in [Path, ]):\n config[key] = str(config[key])\n return config\n # save all configs\n with open(Path(output_dir) / 'configs.yaml', 'w') as f:\n configs = {\n 'model': self.saved_model_config, \n 'training': self.training_config,\n 'meta': serialize(self.meta_config)\n }\n yaml.dump(configs, f, indent=2)\n\n @staticmethod\n def make_optimizer(\n model: nn.Module,\n training_args: dict,\n ) -> Tuple[optim.Optimizer, optim.lr_scheduler._LRScheduler]:\n training_args.setdefault('optimizer', 'adamw')\n training_args.setdefault('no_wd_group', None)\n training_args.setdefault('scheduler', None)\n # optimizer\n if training_args['no_wd_group'] is not None:\n assert isinstance(training_args['no_wd_group'], list)\n def needs_wd(name):\n return all(x not in name for x in training_args['no_wd_group'])\n parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n model_params = [\n {'params': parameters_with_wd},\n {'params': parameters_without_wd, 'weight_decay': 0.0},\n ]\n else:\n model_params = model.parameters()\n optimizer = make_optimizer(\n training_args['optimizer'],\n model_params,\n training_args['lr'],\n training_args['weight_decay'],\n )\n # scheduler\n if training_args['scheduler'] is not None:\n scheduler = None\n else:\n scheduler = None\n\n return optimizer, scheduler\n \n @staticmethod\n def prepare_tensor_loader(\n X_num: Optional[torch.Tensor],\n X_cat: Optional[torch.Tensor],\n ys: torch.Tensor,\n batch_size: int = 64,\n shuffle: bool = False,\n ):\n assert not all(x is None for x in [X_num, X_cat])\n missing_placeholder = 0 if X_num is None else 1 if X_cat is None else -1\n datas = [x for x in [X_num, X_cat, ys] if x is not None]\n tensor_dataset = TensorDataset(*datas)\n tensor_loader = DataLoader(\n tensor_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n )\n return tensor_loader, missing_placeholder\n \n @staticmethod\n def parse_batch(batch: Tuple[torch.Tensor], missing_idx, device: torch.device):\n if batch[0].device.type != device.type:\n # if batch[0].device != device: # initialize self.device with model.device rather than torch.device()\n # batch = (x.to(device) for x in batch) # generator\n batch = tuple([x.to(device) for x in batch]) # list\n if missing_idx == -1:\n return batch\n else:\n return batch[:missing_idx] + [None,] + batch[missing_idx:]\n \n @staticmethod\n def compute_loss(logits: torch.Tensor, targets: torch.Tensor, task: str, reduction: str = 'mean'):\n loss_fn = {\n 'binclass': F.binary_cross_entropy_with_logits,\n 'multiclass': F.cross_entropy,\n 'regression': F.mse_loss,\n }[task]\n return loss_fn(logits.squeeze(-1), targets, reduction=reduction)\n \n @staticmethod\n def calculate_metric(\n golds,\n predictions,\n task: str,\n prediction_type: Optional[str] = None,\n y_std: Optional[float] = None,\n ):\n \"\"\"Calculate metrics\"\"\"\n metric = {\n 'regression': 'rmse', \n 'binclass': 'roc_auc', \n 'multiclass': 'accuracy'\n }[task]\n \n return calculate_metrics(\n golds, predictions,\n task, prediction_type, y_std\n )[metric], metric\n \n def better_result(self, dev_metric, task, is_loss=False):\n if is_loss: # logloss\n best_dev_metric = self.history['val']['best_log_loss']\n if best_dev_metric is None or best_dev_metric > dev_metric:\n self.history['val']['best_log_loss'] = dev_metric\n return True\n else:\n return False\n best_dev_metric = self.history['val']['best_metric']\n if best_dev_metric is None:\n self.history['val']['best_metric'] = dev_metric\n return True\n elif task == 'regression': # rmse\n if best_dev_metric > dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n else:\n if best_dev_metric < dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n \n def early_stop_handler(self, epoch, tot_step, dev_metric, task, patience, save_path):\n if task != 'regression' and self.better_result(dev_metric['log_loss'], task, is_loss=True):\n # record best logloss\n torch.save(self.model.state_dict(), Path(save_path) / 'best-logloss.bin')\n if self.better_result(dev_metric['metric'], task):\n print('<<< Best Dev Result', end='')\n torch.save(self.model.state_dict(), Path(save_path) / 'best.bin')\n self.no_improvement = 0\n self.history['val']['best_epoch'] = epoch\n self.history['val']['best_step'] = tot_step\n else:\n self.no_improvement += 1\n print(f'| [no improvement] {self.no_improvement}', end='')\n if patience <= 0:\n return False\n else:\n return self.no_improvement >= patience\n \n def save_evaluate_dnn(\n self, \n # print and saved infos\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n # evaluate infos\n task, patience, save_path,\n dev_loader, y_std\n ):\n \"\"\"For DNN models\"\"\"\n epoch, step = tot_step // steps_per_epoch, (tot_step - 1) % steps_per_epoch + 1\n avg_loss = tot_loss / step\n self.history['train']['loss'].append(avg_loss)\n self.history['train']['tot_time'] = tot_time\n self.history['train']['avg_step_time'] = tot_time / tot_step\n self.history['train']['avg_epoch_time'] = self.history['train']['avg_step_time'] * steps_per_epoch\n print(f\"[epoch] {epoch} | [step] {step} | [tot_step] {tot_step} | [used time] {tot_time:.4g} | [train_loss] {avg_loss:.4g} \", end='')\n if dev_loader is not None:\n _, results = self.predict(dev_loader=dev_loader, y_std=y_std, task=task, return_metric=True)\n dev_metric, metric_name = results['metric']\n print(f\"| [{metric_name}] {dev_metric:.4g} \", end='')\n if task != 'regression':\n print(f\"| [log-loss] {results['log_loss']:.4g} \", end='')\n self.history['val']['log_loss'].append(results['log_loss'])\n self.history['val']['metric_name'] = metric_name\n self.history['val']['metric'].append(dev_metric)\n self.history['val']['tot_time'] += results['time']\n self.history['val']['avg_step_time'] = self.history['val']['tot_time'] / tot_step\n self.history['val']['avg_epoch_time'] = self.history['val']['avg_step_time'] * steps_per_epoch\n dev_metric = {'metric': dev_metric, 'log_loss': results['log_loss']}\n if self.early_stop_handler(epoch, tot_step, dev_metric, task, patience, save_path):\n print(' <<< Early Stop')\n return True\n print()\n return False\n \n def load_best_dnn(self, save_path, file='best'):\n model_file = Path(save_path) / f\"{file}.bin\"\n if not os.path.exists(model_file):\n print(f'There is no {file} checkpoint, loading the last one...')\n model_file = Path(save_path) / 'final.bin'\n else:\n print(f'Loading {file} model...')\n self.model.load_state_dict(torch.load(model_file))\n print('successfully')" }, { "identifier": "check_dir", "path": "models/abstract.py", "snippet": "def check_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)" }, { "identifier": "Dataset", "path": "data/utils.py", "snippet": "class Dataset:\n X_num: Optional[ArrayDict]\n X_cat: Optional[ArrayDict]\n y: ArrayDict\n y_info: Dict[str, Any]\n task_type: TaskType\n n_classes: Optional[int]\n name: Optional[str]\n\n @classmethod\n def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset':\n dir_ = Path(dir_)\n\n def load(item) -> ArrayDict:\n def _load(file: Path):\n return cast(np.ndarray, np.load(file)) if file.exists() else None\n return {\n x: _load(dir_ / f'{item}_{x}.npy')\n for x in ['train', 'val', 'test']\n }\n\n info = load_json(dir_ / 'info.json')\n\n return Dataset(\n load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None,\n load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None,\n load('y'),\n {},\n TaskType(info['task_type']),\n info.get('n_classes'),\n info.get('name'),\n )\n\n @property\n def is_binclass(self) -> bool:\n return self.task_type == TaskType.BINCLASS\n\n @property\n def is_multiclass(self) -> bool:\n return self.task_type == TaskType.MULTICLASS\n\n @property\n def is_regression(self) -> bool:\n return self.task_type == TaskType.REGRESSION\n\n @property\n def n_num_features(self) -> int:\n return 0 if self.X_num is None else self.X_num['train'].shape[1]\n\n @property\n def n_cat_features(self) -> int:\n return 0 if self.X_cat is None else self.X_cat['train'].shape[1]\n\n @property\n def n_features(self) -> int:\n return self.n_num_features + self.n_cat_features\n\n def size(self, part: Optional[str]) -> int:\n return sum(map(len, self.y.values())) if part is None else len(self.y[part])\n\n @property\n def nn_output_dim(self) -> int:\n if self.is_multiclass:\n assert self.n_classes is not None\n return self.n_classes\n else:\n return 1\n\n def get_category_sizes(self, part: str) -> List[int]:\n return [] if self.X_cat is None else get_category_sizes(self.X_cat[part])" }, { "identifier": "DataProcessor", "path": "data/processor.py", "snippet": "class DataProcessor:\n \"\"\"Base class to process a single dataset\"\"\"\n def __init__(\n self, \n normalization: Optional[Normalization] = None,\n num_nan_policy: Optional[NumNanPolicy] = None,\n cat_nan_policy: Optional[CatNanPolicy] = None,\n cat_min_frequency: Optional[float] = None,\n cat_encoding: Optional[CatEncoding] = None,\n y_policy: Optional[YPolicy] = 'default',\n seed: int = 42,\n cache_dir: Optional[str] = None,\n ):\n self.transformation = Transformations(\n seed=seed, \n normalization=normalization, \n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n y_policy=y_policy\n )\n self.cache_dir = cache_dir\n \n def apply(self, dataset: Dataset):\n return transform_dataset(dataset, self.transformation, self.cache_dir)\n \n def save(self, file, **kwargs):\n data_config = {\n 'transformation': vars(self.transformation),\n 'cache_dir': str(self.cache_dir),\n 'meta': kwargs,\n }\n with open(file, 'w') as f:\n yaml.dump(data_config, f, indent=2)\n \n @staticmethod\n def check_splits(dataset: Dataset):\n valid_splits = True\n if 'train' in dataset.y:\n if 'test' not in dataset.y:\n warnings.warn(\"Missing test split, unable to prediction\")\n valid_splits = False\n if 'val' not in dataset.y:\n warnings.warn(\"Missing dev split, unable to early stop, or ignore this message if no early stop needed.\")\n valid_splits = False\n if valid_splits:\n print(\"ready for training!\")\n else:\n raise ValueError(\"Missing training split in the dataset\")\n \n @staticmethod\n def prepare(dataset: Dataset, model: Optional[TabModel] = None, device: str = 'cuda'):\n assert model is not None or device is not None\n def get_spl(X: Optional[Union[ArrayDict, TensorDict]], spl):\n return None if X is None else X[spl]\n if device is not None or isinstance(model.model, nn.Module):\n device = device or model.model.device\n X_num, X_cat, ys = prepare_tensors(dataset, device)\n return {spl: (\n get_spl(X_num, spl), \n get_spl(X_cat, spl), \n get_spl(ys, spl)\n ) for spl in ys}\n else:\n return {spl: (\n get_spl(dataset.X_num, spl), \n get_spl(dataset.X_cat, spl), \n get_spl(dataset.y, spl)\n ) for spl in dataset.y}\n \n @staticmethod\n def load_preproc_default(\n output_dir, # output preprocessing infos\n model_name, \n dataset_name, \n benchmark_name: Optional[str] = None, \n seed: int = 42, \n cache_dir: Optional[str] = None\n ):\n global DATASETS, CUSTOM_DATASETS\n \"\"\"default data preprocessing pipeline\"\"\"\n if dataset_name in DATASETS or dataset_name in CUSTOM_DATASETS:\n data_src = DATASETS if dataset_name in DATASETS else CUSTOM_DATASETS\n data_config = data_src[dataset_name]\n data_path = Path(data_config['path'])\n data_config.setdefault('normalization', 'quantile')\n normalization = data_config['normalization']\n elif benchmark_name is not None:\n assert benchmark_name in BENCHMARKS, f\"Benchmark '{benchmark_name}' is not included, \\\n please choose one of '{list(BENCHMARKS.keys())}', for include your benchmark manually.\"\n benchmark_info = BENCHMARKS[benchmark_name]\n assert dataset_name in benchmark_info['datasets'], f\"dataset '{dataset_name}' not in benchmark '{benchmark_name}'\"\n data_path = Path(benchmark_info['path']) / dataset_name\n normalization = 'quantile'\n else:\n raise ValueError(f\"No dataset '{dataset_name}' is available, \\\n if you want to use a custom dataset (from csv file), using `add_custom_dataset`\")\n \n dataset = Dataset.from_dir(data_path)\n # default preprocess settings\n num_nan_policy = 'mean' if dataset.X_num is not None and \\\n any(np.isnan(dataset.X_num[spl]).any() for spl in dataset.X_num) else None\n cat_nan_policy = None\n if model_name in ['xgboost', 'catboost', 'lightgbm']: # for tree models or other sklearn algorithms\n normalization = None\n cat_min_frequency = None\n cat_encoding = 'one-hot'\n if model_name in ['catboost']:\n cat_encoding = None\n else: # for dnns\n # BUG: (dataset.X_cat[spl] == CAT_MISSING_VALUE).any() has different action\n # dtype: int -> bool, dtype: string -> array[bool], dtype: object -> np.load error\n # CURRENT: uniformly using string type to store catgorical features\n if dataset.X_cat is not None and \\\n any((dataset.X_cat[spl] == CAT_MISSING_VALUE).any() for spl in dataset.X_cat):\n cat_nan_policy = 'most_frequent'\n cat_min_frequency = None\n cat_encoding = None\n cache_dir = cache_dir or data_path\n processor = DataProcessor(\n normalization=normalization,\n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n seed=seed,\n cache_dir=Path(cache_dir),\n )\n dataset = processor.apply(dataset)\n # check train, val, test splits\n DataProcessor.check_splits(dataset)\n # save preprocessing infos\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n processor.save(\n Path(output_dir) / 'data_config.yaml',\n benchmark=str(benchmark_name),\n dataset=dataset_name\n )\n return dataset\n\n @staticmethod\n def split(\n X_num: Optional[np.ndarray] = None, \n X_cat: Optional[np.ndarray] = None, \n ys: np.ndarray = None, \n train_ratio: float = 0.8,\n stratify: bool = True,\n seed: int = 42,\n ):\n assert 0 < train_ratio < 1\n assert ys is not None\n sample_idx = np.arange(len(ys))\n test_ratio = 1 - train_ratio\n _stratify = None if not stratify else ys\n train_idx, test_idx = train_test_split(sample_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n _stratify = None if not stratify else ys[train_idx]\n train_idx, val_idx = train_test_split(train_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n if X_num is not None:\n X_num = {'train': X_num[train_idx], 'val': X_num[val_idx], 'test': X_num[test_idx]}\n if X_cat is not None:\n X_cat = {'train': X_cat[train_idx], 'val': X_cat[val_idx], 'test': X_cat[test_idx]}\n ys = {'train': ys[train_idx], 'val': ys[val_idx], 'test': ys[test_idx]}\n idx = {'train': train_idx, 'val': val_idx, 'test': test_idx}\n return X_num, X_cat, ys, idx\n \n @staticmethod\n def del_custom_dataset(\n dataset_names: Union[str, List[str]]\n ):\n global DATASETS, CUSTOM_DATASETS\n all_infos = read_custom_infos()\n if isinstance(dataset_names, str):\n dataset_names = [dataset_names]\n for dataset_name in dataset_names:\n if dataset_name not in CUSTOM_DATASETS:\n print(f\"custom dataset: {dataset_name} not exist\")\n continue\n elif dataset_name in DATASETS:\n print(f\"can not delete an in-built dataset: {dataset_name}\")\n continue\n data_info = CUSTOM_DATASETS[dataset_name]\n task = data_info['task_type']\n data_path = data_info['path']\n data_idx = [info['name'] for info in all_infos['data_list']].index(dataset_name)\n all_infos['data_list'].pop(data_idx)\n all_infos['n_datasets'] -= 1\n all_infos[task] -= 1\n shutil.rmtree(data_path)\n print(f\"delete dataset: {dataset_name} successfully\")\n write_custom_infos(all_infos)\n from .env import CUSTOM_DATASETS # BUG: refresh the global variable\n\n @staticmethod\n def add_custom_dataset(\n file: Union[str, Path],\n format: DataFileType = 'csv',\n dataset_name: Optional[str] = None,\n task: Optional[str] = None,\n num_cols: Optional[List[int]] = None,\n cat_cols: Optional[List[int]] = None,\n label_index: int = -1, # label column index\n header: Optional[int] = 0, # header row\n max_cat_num: int = 16,\n train_ratio: float = 0.8, # split train / test, train / val\n seed: float = 42, # random split seed\n ):\n \"\"\"\n Support for adding a custom dataset from a single data file\n ---\n read a raw csv file, process into 3 splits (train, val, test), and add to custom_datasets\n\n TODO: adding a dataset from prepared data split files \n TODO: support no validation split\n \"\"\"\n global DATASETS, CUSTOM_DATASETS\n file_name = Path(file).name\n assert file_name.endswith(format), f'please check if the file \\\n is in {format} format, or add the suffix manually'\n dataset_name = dataset_name or file_name[:-len(format)-1]\n assert dataset_name not in DATASETS, f'same dataset name as an in-built dataset: {dataset_name}'\n assert dataset_name not in CUSTOM_DATASETS, f\"existing custom dataset '{dataset_name}' found\"\n \n if format == 'csv':\n datas: pd.DataFrame = pd.read_csv(file, header=header)\n columns = datas.columns if header is not None else None\n elif format == 'npy':\n header = None # numpy file has no headers\n columns = None\n datas = np.load(file)\n raise NotImplementedError(\"only support load csv file now\")\n else:\n raise ValueError(\"other support format to be add further\")\n \n X_idx = list(range(datas.shape[1]))\n y_idx = X_idx.pop(label_index)\n label_name = columns[y_idx] if columns is not None else None\n # numerical and categorical feature detection\n if num_cols is None or cat_cols is None:\n print('automatically detect column type...')\n print('max category amount: ', max_cat_num)\n num_cols, cat_cols = [], []\n num_names, cat_names = [], []\n for i in X_idx:\n if datas.iloc[:, i].values.dtype == float:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n else: # int or object (str)\n if len(set(datas.iloc[:, i].values)) <= max_cat_num:\n cat_cols.append(i)\n if columns is not None:\n cat_names.append(columns[i])\n elif datas.iloc[:, i].values.dtype == int:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n if not num_names and not cat_names:\n num_names, cat_names = None, None\n elif columns:\n num_names = [columns[i] for i in num_cols]\n cat_names = [columns[i] for i in cat_cols]\n else:\n num_names, cat_names = None, None\n n_num_features = len(num_cols)\n n_cat_features = len(cat_cols)\n # build X_num and X_cat\n X_num, ys = None, datas.iloc[:, y_idx].values\n if len(num_cols) > 0:\n X_num = datas.iloc[:, num_cols].values.astype(np.float32)\n # check data type\n X_cat = []\n for i in cat_cols:\n if datas.iloc[:, i].values.dtype == int:\n x = datas.iloc[:, i].values.astype(np.int64)\n # ordered by value\n # x = OrdinalEncoder(categories=[sorted(list(set(x)))]).fit_transform(x.reshape(-1, 1))\n else: # string object\n x = datas.iloc[:, i].values.astype(object)\n # most_common = [item[0] for item in Counter(x).most_common()]\n # ordered by frequency\n # x = OrdinalEncoder(categories=[most_common]).fit_transform(x.reshape(-1, 1))\n X_cat.append(x.astype(np.str0)) # Encoder Later, compatible with Line 140\n X_cat = np.stack(X_cat, axis=1) if len(X_cat) > 0 else None # if using OrdinalEncoder, np.concatenate\n # detect task type\n def process_non_regression_labels(ys: np.ndarray, task):\n if ys.dtype in [int, float]:\n ys = OrdinalEncoder(categories=[sorted(list(set(ys)))]).fit_transform(ys.reshape(-1, 1))\n else:\n most_common = [item[0] for item in Counter(ys).most_common()]\n ys = OrdinalEncoder(categories=most_common).fit_transform(ys.reshape(-1, 1))\n ys = ys[:, 0]\n return ys.astype(np.float32) if task == 'binclass' else ys.astype(np.int64)\n \n if task is None:\n if ys.dtype in [int, object]:\n task = 'binclass' if len(set(ys)) == 2 else 'multiclass'\n ys = process_non_regression_labels(ys, task)\n elif ys.dtype == float:\n if len(set(ys)) == 2:\n task = 'binclass'\n ys = process_non_regression_labels(ys, task)\n else:\n task = 'regression'\n ys = ys.astype(np.float32)\n else:\n if task == 'regression':\n ys = ys.astype(np.float32)\n else:\n ys = process_non_regression_labels(ys, task)\n\n # split datasets\n stratify = task != 'regression'\n X_num, X_cat, ys, idx = DataProcessor.split(X_num, X_cat, ys, train_ratio, stratify, seed)\n # push to CUSTOM_DATASETS\n data_info = {\n 'name': dataset_name,\n 'id': f'{dataset_name.lower()}--custom',\n 'task_type': task,\n 'label_name': label_name,\n 'n_num_features': n_num_features,\n 'num_feature_names': num_names,\n 'n_cat_features': n_cat_features,\n 'cat_feature_names': cat_names,\n 'test_size': len(ys['test']),\n 'train_size': len(ys['train']),\n 'val_size': len(ys['val'])}\n push_custom_datasets(X_num, X_cat, ys, idx, data_info)\n from .env import CUSTOM_DATASETS # refresh global variable\n print(f'finish, now you can load your dataset with `load_preproc_default({dataset_name})`')" } ]
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
14,917
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE,
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE,
'ft-transformer': FTTransformer, 'saint': None,
1
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/signals/test_kramers_kronig_transform.py
[ { "identifier": "VolumePlasmonDrude", "path": "exspy/components/volume_plasmon_drude.py", "snippet": "class VolumePlasmonDrude(hs.model.components1D.Expression):\n r\"\"\"\n Drude volume plasmon energy loss function component, the energy loss\n function is defined as:\n\n .. math::\n\n f(E) = I_0 \\frac{E(\\Delta E_p)E_p^2}{(E^2-E_p^2)^2+(E\\Delta E_p)^2}\n\n ================== ===============\n Variable Parameter\n ================== ===============\n :math:`I_0` intensity\n :math:`E_p` plasmon_energy\n :math:`\\Delta E_p` fwhm\n ================== ===============\n\n Parameters\n ----------\n intensity : float\n plasmon_energy : float\n fwhm : float\n **kwargs\n Extra keyword arguments are passed to the\n :py:class:`hyperspy._components.expression.Expression` component.\n\n Notes\n -----\n Refer to Egerton, R. F., Electron Energy-Loss Spectroscopy in the\n Electron Microscope, 2nd edition, Plenum Press 1996, pp. 154-158\n for details, including original equations.\n \"\"\"\n\n def __init__(\n self,\n intensity=1.0,\n plasmon_energy=15.0,\n fwhm=1.5,\n module=\"numexpr\",\n compute_gradients=False,\n **kwargs,\n ):\n super().__init__(\n expression=\"where(x > 0, intensity * (pe2 * x * fwhm) \\\n / ((x ** 2 - pe2) ** 2 + (x * fwhm) ** 2), 0); \\\n pe2 = plasmon_energy ** 2\",\n name=\"VolumePlasmonDrude\",\n intensity=intensity,\n plasmon_energy=plasmon_energy,\n fwhm=fwhm,\n position=\"plasmon_energy\",\n module=module,\n autodoc=False,\n compute_gradients=compute_gradients,\n linear_parameter_list=[\"intensity\"],\n check_parameter_linearity=False,\n **kwargs,\n )\n\n # Partial derivative with respect to the plasmon energy E_p\n def grad_plasmon_energy(self, x):\n plasmon_energy = self.plasmon_energy.value\n fwhm = self.fwhm.value\n intensity = self.intensity.value\n\n return np.where(\n x > 0,\n 2\n * x\n * fwhm\n * plasmon_energy\n * intensity\n * (\n (x**4 + (x * fwhm) ** 2 - plasmon_energy**4)\n / (\n x**4\n + x**2 * (fwhm**2 - 2 * plasmon_energy**2)\n + plasmon_energy**4\n )\n ** 2\n ),\n 0,\n )\n\n # Partial derivative with respect to the plasmon linewidth delta_E_p\n def grad_fwhm(self, x):\n plasmon_energy = self.plasmon_energy.value\n fwhm = self.fwhm.value\n intensity = self.intensity.value\n\n return np.where(\n x > 0,\n x\n * plasmon_energy\n * intensity\n * (\n (\n x**4\n - x**2 * (2 * plasmon_energy**2 + fwhm**2)\n + plasmon_energy**4\n )\n / (\n x**4\n + x**2 * (fwhm**2 - 2 * plasmon_energy**2)\n + plasmon_energy**4\n )\n ** 2\n ),\n 0,\n )\n\n def grad_intensity(self, x):\n return self.function(x) / self.intensity.value" }, { "identifier": "eels_constant", "path": "exspy/misc/eels/tools.py", "snippet": "def eels_constant(s, zlp, t):\n r\"\"\"Calculate the constant of proportionality (k) in the relationship\n between the EELS signal and the dielectric function.\n dielectric function from a single scattering distribution (SSD) using\n the Kramers-Kronig relations.\n\n .. math::\n\n S(E)=\\frac{I_{0}t}{\\pi a_{0}m_{0}v^{2}}\\ln\\left[1+\\left(\\frac{\\beta}\n {\\theta_{E}}\\right)^{2}\\right]\\Im(\\frac{-1}{\\epsilon(E)})=\n k\\Im(\\frac{-1}{\\epsilon(E)})\n\n\n Parameters\n ----------\n zlp: {number, BaseSignal}\n If the ZLP is the same for all spectra, the intengral of the ZLP\n can be provided as a number. Otherwise, if the ZLP intensity is not\n the same for all spectra, it can be provided as i) a Signal\n of the same dimensions as the current signal containing the ZLP\n spectra for each location ii) a Signal of signal dimension 0\n and navigation_dimension equal to the current signal containing the\n integrated ZLP intensity.\n t: {None, number, BaseSignal}\n The sample thickness in nm. If the thickness is the same for all\n spectra it can be given by a number. Otherwise, it can be provided\n as a Signal with signal dimension 0 and navigation_dimension equal\n to the current signal.\n\n Returns\n -------\n k: Signal instance\n\n \"\"\"\n\n # Constants and units\n me = constants.value(\"electron mass energy equivalent in MeV\") * 1e3 # keV\n\n # Mapped parameters\n try:\n e0 = s.metadata.Acquisition_instrument.TEM.beam_energy\n except BaseException:\n raise AttributeError(\n \"Please define the beam energy.\"\n \"You can do this e.g. by using the \"\n \"set_microscope_parameters method\"\n )\n try:\n beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle\n except BaseException:\n raise AttributeError(\n \"Please define the collection semi-angle.\"\n \"You can do this e.g. by using the \"\n \"set_microscope_parameters method\"\n )\n\n axis = s.axes_manager.signal_axes[0]\n eaxis = axis.axis.copy()\n if eaxis[0] == 0:\n # Avoid singularity at E=0\n eaxis[0] = 1e-10\n\n if isinstance(zlp, hyperspy.signal.BaseSignal):\n if zlp.axes_manager.navigation_dimension == s.axes_manager.navigation_dimension:\n if zlp.axes_manager.signal_dimension == 0:\n i0 = zlp.data\n else:\n i0 = zlp.integrate1D(axis.index_in_axes_manager).data\n else:\n raise ValueError(\n \"The ZLP signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n # The following prevents errors if the signal is a single spectrum\n if len(i0) != 1:\n i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))\n elif isinstance(zlp, numbers.Number):\n i0 = zlp\n else:\n raise ValueError(\n \"The zero-loss peak input is not valid, it must be\\\n in the BaseSignal class or a Number.\"\n )\n\n if isinstance(t, hyperspy.signal.BaseSignal):\n if (\n t.axes_manager.navigation_dimension == s.axes_manager.navigation_dimension\n ) and (t.axes_manager.signal_dimension == 0):\n t = t.data\n t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))\n else:\n raise ValueError(\n \"The thickness signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n k = s.__class__(\n data=(t * i0 / (332.5 * ke)) * np.log(1 + (beta * tgt / eaxis) ** 2)\n )\n k.metadata.General.title = \"EELS proportionality constant K\"\n return k" }, { "identifier": "EELSSpectrum", "path": "exspy/signals/eels.py", "snippet": "class EELSSpectrum(Signal1D):\n\n \"\"\"Signal class for EELS spectra.\"\"\"\n\n _signal_type = \"EELS\"\n _alias_signal_types = [\"TEM EELS\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Attributes defaults\n self.subshells = set()\n self.elements = set()\n self.edges = list()\n if hasattr(self.metadata, \"Sample\") and hasattr(\n self.metadata.Sample, \"elements\"\n ):\n self.add_elements(self.metadata.Sample.elements)\n self.axes_manager.signal_axes[0].is_binned = True\n self._edge_markers = {\"names\": [], \"lines\": None, \"texts\": None}\n\n def add_elements(self, elements, include_pre_edges=False):\n \"\"\"Declare the elemental composition of the sample.\n\n The ionisation edges of the elements present in the current\n energy range will be added automatically.\n\n Parameters\n ----------\n elements : tuple of strings\n The symbol of the elements. Note this input must always be\n in the form of a tuple. Meaning: add_elements(('C',)) will\n work, while add_elements(('C')) will NOT work.\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n Examples\n --------\n\n >>> s = hs.signals.EELSSpectrum(np.arange(1024))\n >>> s.add_elements(('C', 'O'))\n\n Raises\n ------\n ValueError\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a tuple. For example, \"\n \"if `s` is the variable containing this EELS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n\n for element in elements:\n if isinstance(element, bytes):\n element = element.decode()\n if element in elements_db:\n self.elements.add(element)\n else:\n raise ValueError(\n \"%s is not a valid symbol of a chemical element\" % element\n )\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n self.metadata.Sample.elements = list(self.elements)\n if self.elements:\n self.generate_subshells(include_pre_edges)\n\n def generate_subshells(self, include_pre_edges=False):\n \"\"\"Calculate the subshells for the current energy range for the\n elements present in self.elements\n\n Parameters\n ----------\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n \"\"\"\n Eaxis = self.axes_manager.signal_axes[0].axis\n if not include_pre_edges:\n start_energy = Eaxis[0]\n else:\n start_energy = 0.0\n end_energy = Eaxis[-1]\n for element in self.elements:\n e_shells = list()\n for shell in elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]:\n if shell[-1] != \"a\":\n energy = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ][shell][\"onset_energy (eV)\"]\n if start_energy <= energy <= end_energy:\n subshell = \"%s_%s\" % (element, shell)\n if subshell not in self.subshells:\n self.subshells.add(\"%s_%s\" % (element, shell))\n e_shells.append(subshell)\n\n def edges_at_energy(\n self,\n energy=\"interactive\",\n width=10,\n only_major=False,\n order=\"closest\",\n display=True,\n toolkit=None,\n ):\n \"\"\"Show EELS edges according to an energy range selected from the\n spectrum or within a provided energy window\n\n Parameters\n ----------\n energy : 'interactive' or float\n If it is 'interactive', a table with edges are shown and it depends\n on the energy range selected in the spectrum. If it is a float, a\n table with edges are shown and it depends on the energy window\n defined by energy +/- (width/2). The default is 'interactive'.\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n\n Returns\n -------\n An interactive widget if energy is 'interactive', or a html-format\n table or ASCII table, depends on the environment.\n \"\"\"\n\n if energy == \"interactive\":\n er = EdgesRange(self, interactive=True)\n return er.gui(display=display, toolkit=toolkit)\n else:\n self.print_edges_near_energy(energy, width, only_major, order)\n\n @staticmethod\n def print_edges_near_energy(\n energy=None, width=10, only_major=False, order=\"closest\", edges=None\n ):\n \"\"\"Find and print a table of edges near a given energy that are within\n the given energy window.\n\n Parameters\n ----------\n energy : float\n Energy to search, in eV\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n edges : iterable\n A sequence of edges, if provided, it overrides energy, width,\n only_major and order.\n\n Returns\n -------\n A PrettyText object where its representation is ASCII in terminal and\n html-formatted in Jupyter notebook\n \"\"\"\n\n if edges is None and energy is not None:\n edges = get_edges_near_energy(\n energy=energy, width=width, only_major=only_major, order=order\n )\n elif edges is None and energy is None:\n raise ValueError(\"Either energy or edges should be provided.\")\n\n table = PrettyTable()\n table.field_names = [\"edge\", \"onset energy (eV)\", \"relevance\", \"description\"]\n\n for edge in edges:\n element, shell = edge.split(\"_\")\n shell_dict = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"][\n shell\n ]\n\n onset = shell_dict[\"onset_energy (eV)\"]\n relevance = shell_dict[\"relevance\"]\n threshold = shell_dict[\"threshold\"]\n edge_ = shell_dict[\"edge\"]\n description = threshold + \". \" * (threshold != \"\" and edge_ != \"\") + edge_\n\n table.add_row([edge, onset, relevance, description])\n\n # this ensures the html version try its best to mimick the ASCII one\n table.format = True\n\n display(table)\n\n def estimate_zero_loss_peak_centre(self, mask=None):\n \"\"\"Estimate the position of the zero-loss peak.\n\n This function provides just a coarse estimation of the position\n of the zero-loss peak centre by computing the position of the maximum\n of the spectra. For subpixel accuracy use `estimate_shift1D`.\n\n Parameters\n ----------\n mask : Signal1D of bool data type or bool array\n It must have signal_dimension = 0 and navigation_shape equal to the\n navigation shape of the current signal. Where mask is True the\n shift is not computed and set to nan.\n\n Returns\n -------\n zlpc : Signal1D subclass\n The estimated position of the maximum of the ZLP peak.\n\n Notes\n -----\n This function only works when the zero-loss peak is the most\n intense feature in the spectrum. If it is not in most cases\n the spectrum can be cropped to meet this criterion.\n Alternatively use `estimate_shift1D`.\n\n See Also\n --------\n estimate_shift1D, align_zero_loss_peak\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n self._check_navigation_mask(mask)\n if isinstance(mask, BaseSignal):\n mask = mask.data\n zlpc = self.valuemax(-1)\n if mask is not None:\n zlpc.data = np.where(mask, np.nan, zlpc.data)\n zlpc.set_signal_type(\"\")\n title = self.metadata.General.title\n zlpc.metadata.General.title = \"ZLP(%s)\" % title\n return zlpc\n\n def align_zero_loss_peak(\n self,\n calibrate=True,\n also_align=[],\n print_stats=True,\n subpixel=True,\n mask=None,\n signal_range=None,\n show_progressbar=None,\n crop=True,\n **kwargs,\n ):\n \"\"\"Align the zero-loss peak.\n\n This function first aligns the spectra using the result of\n `estimate_zero_loss_peak_centre` which finds the maximum in the\n given energy range, then if subpixel is True,\n proceeds to align with subpixel accuracy using `align1D`. The offset\n is automatically correct if `calibrate` is True.\n\n Parameters\n ----------\n calibrate : bool\n If True, set the offset of the spectral axis so that the\n zero-loss peak is at position zero.\n also_align : list of signals\n A list containing other spectra of identical dimensions to\n align using the shifts applied to the current spectrum.\n If `calibrate` is True, the calibration is also applied to\n the spectra in the list.\n print_stats : bool\n If True, print summary statistics of the ZLP maximum before\n the alignment.\n subpixel : bool\n If True, perform the alignment with subpixel accuracy\n using cross-correlation.\n mask : Signal1D of bool data type or bool array.\n It must have signal_dimension = 0 and navigation_shape equal to\n the shape of the current signal. Where mask is True the shift is\n not computed and set to nan.\n signal_range : tuple of integers, tuple of floats. Optional\n Will only search for the ZLP within the signal_range. If given\n in integers, the range will be in index values. If given floats,\n the range will be in spectrum values. Useful if there are features\n in the spectrum which are more intense than the ZLP.\n Default is searching in the whole signal. Note that ROIs can be used\n in place of a tuple.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Examples\n --------\n >>> s_ll = hs.signals.EELSSpectrum(np.zeros(1000))\n >>> s_ll.data[100] = 100\n >>> s_ll.align_zero_loss_peak()\n\n Aligning both the lowloss signal and another signal\n\n >>> s = hs.signals.EELSSpectrum(np.range(1000))\n >>> s_ll.align_zero_loss_peak(also_align=[s])\n\n Aligning within a narrow range of the lowloss signal\n\n >>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))\n\n\n See Also\n --------\n estimate_zero_loss_peak_centre, align1D, estimate_shift1D.\n\n Notes\n -----\n Any extra keyword arguments are passed to `align1D`. For\n more information read its docstring.\n\n \"\"\"\n\n def substract_from_offset(value, signals):\n # Test that axes is uniform\n if not self.axes_manager[-1].is_uniform:\n raise NotImplementedError(\n \"Support for EELS signals with \"\n \"non-uniform signal axes is not yet implemented.\"\n )\n if isinstance(value, da.Array):\n value = value.compute()\n for signal in signals:\n signal.axes_manager[-1].offset -= value\n signal.events.data_changed.trigger(signal)\n\n def estimate_zero_loss_peak_centre(s, mask, signal_range):\n if signal_range:\n zlpc = s.isig[\n signal_range[0] : signal_range[1]\n ].estimate_zero_loss_peak_centre(mask=mask)\n else:\n zlpc = s.estimate_zero_loss_peak_centre(mask=mask)\n return zlpc\n\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n\n mean_ = np.nanmean(zlpc.data)\n\n if print_stats is True:\n print(underline(\"Initial ZLP position statistics\"))\n zlpc.print_summary_statistics()\n\n for signal in also_align + [self]:\n shift_array = -zlpc.data + mean_\n if zlpc._lazy:\n # We must compute right now because otherwise any changes to the\n # axes_manager of the signal later in the workflow may result in\n # a wrong shift_array\n shift_array = shift_array.compute()\n signal.shift1D(shift_array, crop=crop, show_progressbar=show_progressbar)\n\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n if subpixel is False:\n return\n\n start, end = signal_range or (-3.0, 3.0)\n\n if calibrate is False:\n start += mean_\n end += mean_\n\n start = (\n start\n if start > self.axes_manager[-1].axis[0]\n else self.axes_manager[-1].axis[0]\n )\n end = (\n end\n if end < self.axes_manager[-1].axis[-1]\n else self.axes_manager[-1].axis[-1]\n )\n\n if self.axes_manager.navigation_size > 1:\n self.align1D(\n start,\n end,\n also_align=also_align,\n show_progressbar=show_progressbar,\n mask=mask,\n crop=crop,\n **kwargs,\n )\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n align_zero_loss_peak.__doc__ %= (SHOW_PROGRESSBAR_ARG, CROP_PARAMETER_DOC)\n\n def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, signal_mask=None):\n \"\"\"Return boolean array with True value at the position of the zero\n loss peak. This mask can be used to restrict operation to the signal\n locations not marked as True (masked).\n\n Parameters\n ----------\n zero_loss_peak_mask_width: float\n Width of the zero loss peak mask.\n %s\n\n Returns\n -------\n bool array\n \"\"\"\n zlpc = self.estimate_zero_loss_peak_centre()\n (signal_axis,) = self.axes_manager[self.axes_manager.signal_axes]\n axis = signal_axis.axis\n mini_value = zlpc.data.mean() - zero_loss_peak_mask_width / 2\n maxi_value = zlpc.data.mean() + zero_loss_peak_mask_width / 2\n mask = np.logical_and(mini_value <= axis, axis <= maxi_value)\n if signal_mask is not None:\n signal_mask = np.logical_or(mask, signal_mask)\n else:\n signal_mask = mask\n return signal_mask\n\n get_zero_loss_peak_mask.__doc__ %= SIGNAL_MASK_ARG\n\n def spikes_diagnosis(\n self,\n signal_mask=None,\n navigation_mask=None,\n zero_loss_peak_mask_width=None,\n **kwargs,\n ):\n if zero_loss_peak_mask_width is not None:\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_diagnosis(\n signal_mask=signal_mask, navigation_mask=None, **kwargs\n )\n\n spikes_diagnosis.__doc__ = SPIKES_DIAGNOSIS_DOCSTRING % MASK_ZERO_LOSS_PEAK_WIDTH\n\n def spikes_removal_tool(\n self,\n signal_mask=None,\n navigation_mask=None,\n threshold=\"auto\",\n zero_loss_peak_mask_width=None,\n interactive=True,\n display=True,\n toolkit=None,\n ):\n if zero_loss_peak_mask_width is not None:\n axis = self.axes_manager.signal_axes[0].axis\n # check the zero_loss is in the signal\n if (\n axis[0] - zero_loss_peak_mask_width / 2 > 0\n or axis[-1] + zero_loss_peak_mask_width / 2 < 0\n ):\n raise ValueError(\"The zero loss peaks isn't in the energy range.\")\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_removal_tool(\n signal_mask=signal_mask,\n navigation_mask=navigation_mask,\n threshold=threshold,\n interactive=interactive,\n display=display,\n toolkit=toolkit,\n )\n\n spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (\n SIGNAL_MASK_ARG,\n NAVIGATION_MASK_ARG,\n MASK_ZERO_LOSS_PEAK_WIDTH,\n DISPLAY_DT,\n TOOLKIT_DT,\n )\n\n def estimate_elastic_scattering_intensity(self, threshold, show_progressbar=None):\n \"\"\"Rough estimation of the elastic scattering intensity by\n truncation of a EELS low-loss spectrum.\n\n Parameters\n ----------\n threshold : {Signal1D, float, int}\n Truncation energy to estimate the intensity of the elastic\n scattering. The threshold can be provided as a signal of the same\n dimension as the input spectrum navigation space containing the\n threshold value in the energy units. Alternatively a constant\n threshold can be specified in energy/index units by passing\n float/int.\n %s\n\n Returns\n -------\n I0: Signal1D\n The elastic scattering intensity.\n\n See Also\n --------\n estimate_elastic_scattering_threshold\n\n \"\"\"\n # TODO: Write units tests\n self._check_signal_dimension_equals_one()\n\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n\n if isinstance(threshold, numbers.Number):\n I0 = self.isig[:threshold].integrate1D(-1)\n else:\n ax = self.axes_manager.signal_axes[0]\n # I0 = self._get_navigation_signal()\n # I0 = I0.transpose(signal_axes=[])\n threshold = threshold.transpose(signal_axes=[])\n binned = ax.is_binned\n\n def estimating_function(data, threshold=None):\n if np.isnan(threshold):\n return np.nan\n else:\n # the object is just an array, so have to reimplement\n # integrate1D. However can make certain assumptions, for\n # example 1D signal and pretty much always binned. Should\n # probably at some point be joint\n ind = ax.value2index(threshold)\n data = data[:ind]\n if binned:\n return data.sum()\n else:\n from scipy.integrate import simps\n\n axis = ax.axis[:ind]\n return simps(y=data, x=axis)\n\n I0 = self.map(\n estimating_function,\n threshold=threshold,\n ragged=False,\n show_progressbar=show_progressbar,\n inplace=False,\n )\n I0.metadata.General.title = self.metadata.General.title + \" elastic intensity\"\n I0.set_signal_type(\"\")\n if self.tmp_parameters.has_item(\"filename\"):\n I0.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_intensity\"\n )\n I0.tmp_parameters.folder = self.tmp_parameters.folder\n I0.tmp_parameters.extension = self.tmp_parameters.extension\n return I0\n\n estimate_elastic_scattering_intensity.__doc__ %= SHOW_PROGRESSBAR_ARG\n\n def estimate_elastic_scattering_threshold(\n self, window=10.0, tol=None, window_length=5, polynomial_order=3, start=1.0\n ):\n \"\"\"Calculate the first inflexion point of the spectrum derivative\n within a window.\n\n This method assumes that the zero-loss peak is located at position zero\n in all the spectra. Currently it looks for an inflexion point, that can\n be a local maximum or minimum. Therefore, to estimate the elastic\n scattering threshold `start` + `window` must be less than the first\n maximum for all spectra (often the bulk plasmon maximum). If there is\n more than one inflexion point in energy the window it selects the\n smoother one what, often, but not always, is a good choice in this\n case.\n\n Parameters\n ----------\n window : {None, float}\n If None, the search for the local inflexion point is performed\n using the full energy range. A positive float will restrict\n the search to the (0,window] energy window, where window is given\n in the axis units. If no inflexion point is found in this\n spectral range the window value is returned instead.\n tol : {None, float}\n The threshold tolerance for the derivative. If \"auto\" it is\n automatically calculated as the minimum value that guarantees\n finding an inflexion point in all the spectra in given energy\n range.\n window_length : int\n If non zero performs order three Savitzky-Golay smoothing\n to the data to avoid falling in local minima caused by\n the noise. It must be an odd integer.\n polynomial_order : int\n Savitzky-Golay filter polynomial order.\n start : float\n Position from the zero-loss peak centre from where to start\n looking for the inflexion point.\n\n\n Returns\n -------\n\n threshold : Signal1D\n A Signal1D of the same dimension as the input spectrum\n navigation space containing the estimated threshold. Where the\n threshold couldn't be estimated the value is set to nan.\n\n See Also\n --------\n\n estimate_elastic_scattering_intensity,align_zero_loss_peak,\n find_peaks1D_ohaver, fourier_ratio_deconvolution.\n\n Notes\n -----\n\n The main purpose of this method is to be used as input for\n `estimate_elastic_scattering_intensity`. Indeed, for currently\n achievable energy resolutions, there is not such a thing as a elastic\n scattering threshold. Therefore, please be aware of the limitations of\n this method when using it.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n # Create threshold with the same shape as the navigation dims.\n threshold = self._get_navigation_signal().transpose(signal_axes=0)\n\n # Progress Bar\n axis = self.axes_manager.signal_axes[0]\n min_index, max_index = axis.value_range_to_indices(start, start + window)\n if max_index < min_index + 10:\n raise ValueError(\"Please select a bigger window\")\n s = self.isig[min_index:max_index].deepcopy()\n if window_length:\n s.smooth_savitzky_golay(\n polynomial_order=polynomial_order,\n window_length=window_length,\n differential_order=1,\n )\n else:\n s = s.derivative(-1)\n if tol is None:\n tol = np.max(abs(s.data).min(axis.index_in_array))\n saxis = s.axes_manager[-1]\n inflexion = (abs(s.data) <= tol).argmax(saxis.index_in_array)\n if isinstance(inflexion, da.Array):\n inflexion = inflexion.compute()\n threshold.data[:] = saxis.index2value(inflexion)\n if isinstance(inflexion, np.ndarray):\n threshold.data[inflexion == 0] = np.nan\n else: # Single spectrum\n if inflexion == 0:\n threshold.data[:] = np.nan\n del s\n if np.isnan(threshold.data).any():\n _logger.warning(\n \"No inflexion point could be found in some positions \"\n \"that have been marked with nans.\"\n )\n # Create spectrum image, stop and return value\n threshold.metadata.General.title = (\n self.metadata.General.title + \" elastic scattering threshold\"\n )\n if self.tmp_parameters.has_item(\"filename\"):\n threshold.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_scattering_threshold\"\n )\n threshold.tmp_parameters.folder = self.tmp_parameters.folder\n threshold.tmp_parameters.extension = self.tmp_parameters.extension\n threshold.set_signal_type(\"\")\n return threshold\n\n def estimate_thickness(\n self,\n threshold=None,\n zlp=None,\n density=None,\n mean_free_path=None,\n ):\n \"\"\"Estimates the thickness (relative and absolute)\n of a sample using the log-ratio method.\n\n The current EELS spectrum must be a low-loss spectrum containing\n the zero-loss peak. The hyperspectrum must be well calibrated\n and aligned. To obtain the thickness relative to the mean free path\n don't set the `density` and the `mean_free_path`.\n\n Parameters\n ----------\n threshold : {BaseSignal, float}, optional\n If the zero-loss-peak is not provided, use this energy threshold\n to roughly estimate its intensity by truncation.\n If the threshold is constant across the dataset use a float. Otherwise,\n provide a signal of\n the same dimension as the input spectrum navigation space\n containing the threshold value in the energy units.\n zlp : BaseSignal, optional\n If not None the zero-loss peak intensity is calculated from the ZLP\n spectrum supplied by integration.\n mean_free_path : float, optional\n The mean free path of the material in nanometers.\n If not provided, the thickness\n is given relative to the mean free path.\n density : float, optional\n The density of the material in g/cm**3. This is used to estimate the mean\n free path when the mean free path is not known and to perform the\n angular corrections.\n\n Returns\n -------\n s : BaseSignal\n The thickness relative to the MFP. It returns a Signal1D,\n Signal2D or a BaseSignal, depending on the current navigation\n dimensions.\n\n Notes\n -----\n For details see Egerton, R. Electron Energy-Loss Spectroscopy in the Electron\n Microscope. Springer-Verlag, 2011.\n \"\"\"\n axis = self.axes_manager.signal_axes[0]\n total_intensity = self.integrate1D(axis.index_in_array).data\n if threshold is None and zlp is None:\n raise ValueError(\n \"Please provide one of the following keywords: \" \"`threshold`, `zlp`\"\n )\n if zlp is not None:\n I0 = zlp.integrate1D(axis.index_in_array).data\n else:\n I0 = self.estimate_elastic_scattering_intensity(\n threshold=threshold,\n ).data\n\n t_over_lambda = np.log(total_intensity / I0)\n\n if density is not None:\n if self._are_microscope_parameters_missing():\n raise RuntimeError(\n \"Some microscope parameters are missing. Please use the \"\n \"`set_microscope_parameters()` method to set them. \"\n \"If you don't know them, don't set the `density` keyword.\"\n )\n else:\n md = self.metadata.Acquisition_instrument.TEM\n t_over_lambda *= iMFP_angular_correction(\n beam_energy=md.beam_energy,\n alpha=md.convergence_angle,\n beta=md.Detector.EELS.collection_angle,\n density=density,\n )\n if mean_free_path is None:\n mean_free_path = iMFP_Iakoubovskii(\n electron_energy=self.metadata.Acquisition_instrument.TEM.beam_energy,\n density=density,\n )\n _logger.info(f\"The estimated iMFP is {mean_free_path} nm\")\n else:\n _logger.warning(\n \"Computing the thickness without taking into account the effect of \"\n \"the limited collection angle, what usually leads to underestimating \"\n \"the thickness. To perform the angular corrections you must provide \"\n \"the density of the material.\"\n )\n\n s = self._get_navigation_signal(data=t_over_lambda)\n if mean_free_path is not None:\n s.data *= mean_free_path\n s.metadata.General.title = self.metadata.General.title + \" thickness (nm)\"\n s.metadata.Signal.quantity = \"thickness (nm)\"\n else:\n _logger.warning(\n \"Computing the relative thickness. To compute the absolute \"\n \"thickness provide the `mean_free_path` and/or the `density`\"\n )\n s.metadata.General.title = (\n self.metadata.General.title + \" $\\\\frac{t}{\\\\lambda}$\"\n )\n s.metadata.Signal.quantity = \"$\\\\frac{t}{\\\\lambda}$\"\n if self.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = self.tmp_parameters.filename + \"_thickness\"\n s.tmp_parameters.folder = self.tmp_parameters.folder\n s.tmp_parameters.extension = self.tmp_parameters.extension\n s = s.transpose(signal_axes=[])\n s.set_signal_type(\"\")\n return s\n\n def fourier_log_deconvolution(self, zlp, add_zlp=False, crop=False):\n \"\"\"Performs fourier-log deconvolution.\n\n Parameters\n ----------\n zlp : EELSSpectrum\n The corresponding zero-loss peak.\n\n add_zlp : bool\n If True, adds the ZLP to the deconvolved spectrum\n crop : bool\n If True crop the spectrum to leave out the channels that\n have been modified to decay smoothly to zero at the sides\n of the spectrum.\n\n Returns\n -------\n An EELSSpectrum containing the current data deconvolved.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n s = self.deepcopy()\n zlp_size = zlp.axes_manager.signal_axes[0].size\n self_size = self.axes_manager.signal_axes[0].size\n tapped_channels = s.hanning_taper()\n # Conservative new size to solve the wrap-around problem\n size = zlp_size + self_size - 1\n # Calculate optimal FFT padding for performance\n complex_result = zlp.data.dtype.kind == \"c\" or s.data.dtype.kind == \"c\"\n size = optimal_fft_size(size, not complex_result)\n\n axis = self.axes_manager.signal_axes[0]\n\n z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)\n j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)\n if self._lazy or zlp._lazy:\n j1 = z * da.log(j / z).map_blocks(np.nan_to_num)\n else:\n j1 = z * np.nan_to_num(np.log(j / z))\n sdata = np.fft.irfft(j1, axis=axis.index_in_array)\n\n s.data = sdata[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n if add_zlp is True:\n if self_size >= zlp_size:\n if self._lazy:\n _slices_before = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n _slices_after = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(zlp_size, None)),\n ]\n )\n s.data = da.stack(\n (s.data[_slices_before] + zlp.data, s.data[_slices_after]),\n axis=axis.index_in_array,\n )\n else:\n s.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n ] += zlp.data\n else:\n s.data += zlp.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n\n s.metadata.General.title = (\n s.metadata.General.title + \" after Fourier-log deconvolution\"\n )\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_after_fourier_log_deconvolution\"\n )\n if crop is True:\n s.crop(axis.index_in_axes_manager, None, int(-tapped_channels))\n return s\n\n def fourier_ratio_deconvolution(\n self,\n ll,\n fwhm=None,\n threshold=None,\n extrapolate_lowloss=True,\n extrapolate_coreloss=True,\n ):\n \"\"\"Performs Fourier-ratio deconvolution.\n\n The core-loss should have the background removed. To reduce the noise\n amplification the result is convolved with a Gaussian function.\n\n Parameters\n ----------\n ll: EELSSpectrum\n The corresponding low-loss (ll) EELSSpectrum.\n fwhm : float or None\n Full-width half-maximum of the Gaussian function by which\n the result of the deconvolution is convolved. It can be\n used to select the final SNR and spectral resolution. If\n None, the FWHM of the zero-loss peak of the low-loss is\n estimated and used.\n threshold : {None, float}\n Truncation energy to estimate the intensity of the\n elastic scattering. If None the threshold is taken as the\n first minimum after the ZLP centre.\n extrapolate_lowloss, extrapolate_coreloss : bool\n If True the signals are extrapolated using a power law,\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if not ll.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"The low-loss energy axis is non-uniform. \"\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n orig_cl_size = self.axes_manager.signal_axes[0].size\n\n if threshold is None:\n threshold = ll.estimate_elastic_scattering_threshold()\n\n if extrapolate_coreloss is True:\n cl = self.power_law_extrapolation(window_size=20, extrapolation_size=100)\n else:\n cl = self.deepcopy()\n\n if extrapolate_lowloss is True:\n ll = ll.power_law_extrapolation(window_size=100, extrapolation_size=100)\n else:\n ll = ll.deepcopy()\n\n ll.hanning_taper()\n cl.hanning_taper()\n\n ll_size = ll.axes_manager.signal_axes[0].size\n cl_size = self.axes_manager.signal_axes[0].size\n # Conservative new size to solve the wrap-around problem\n size = ll_size + cl_size - 1\n # Calculate the optimal FFT size\n size = optimal_fft_size(size)\n\n axis = ll.axes_manager.signal_axes[0]\n if fwhm is None:\n fwhm = float(\n ll.get_current_signal().estimate_peak_width()._get_current_data()\n )\n _logger.info(\"FWHM = %1.2f\" % fwhm)\n\n I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)\n I0 = I0.data\n if ll.axes_manager.navigation_size > 0:\n I0_shape = list(I0.shape)\n I0_shape.insert(axis.index_in_array, 1)\n I0 = I0.reshape(I0_shape)\n\n from hyperspy.components1d import Gaussian\n\n g = Gaussian()\n g.sigma.value = fwhm / 2.3548\n g.A.value = 1\n g.centre.value = 0\n zl = g.function(\n np.linspace(axis.offset, axis.offset + axis.scale * (size - 1), size)\n )\n z = np.fft.rfft(zl)\n jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)\n jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)\n zshape = [\n 1,\n ] * len(cl.data.shape)\n zshape[axis.index_in_array] = jk.shape[axis.index_in_array]\n cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl, axis=axis.index_in_array)\n cl.data *= I0\n cl.crop(-1, None, int(orig_cl_size))\n cl.metadata.General.title = (\n self.metadata.General.title + \" after Fourier-ratio deconvolution\"\n )\n if cl.tmp_parameters.has_item(\"filename\"):\n cl.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"after_fourier_ratio_deconvolution\"\n )\n return cl\n\n def richardson_lucy_deconvolution(\n self, psf, iterations=15, show_progressbar=None, num_workers=None\n ):\n \"\"\"1D Richardson-Lucy Poissonian deconvolution of\n the spectrum by the given kernel.\n\n Parameters\n ----------\n psf : EELSSpectrum\n It must have the same signal dimension as the current\n spectrum and a spatial dimension of 0 or the same as the\n current spectrum.\n iterations : int\n Number of iterations of the deconvolution. Note that\n increasing the value will increase the noise amplification.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details on the algorithm see Gloter, A., A. Douiri,\n M. Tence, and C. Colliex. “Improving Energy Resolution of\n EELS Spectra: An Alternative to the Monochromator Solution.”\n Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n psf_size = psf.axes_manager.signal_axes[0].size\n maxval = self.axes_manager.navigation_size\n show_progressbar = show_progressbar and (maxval > 0)\n\n def deconv_function(signal, kernel=None, iterations=15, psf_size=None):\n imax = kernel.argmax()\n result = np.array(signal).copy()\n mimax = psf_size - 1 - imax\n for _ in range(iterations):\n first = np.convolve(kernel, result)[imax : imax + psf_size]\n result *= np.convolve(kernel[::-1], signal / first)[\n mimax : mimax + psf_size\n ]\n return result\n\n ds = self.map(\n deconv_function,\n kernel=psf,\n iterations=iterations,\n psf_size=psf_size,\n show_progressbar=show_progressbar,\n num_workers=num_workers,\n ragged=False,\n inplace=False,\n )\n\n ds.metadata.General.title += (\n \" after Richardson-Lucy deconvolution %i iterations\" % iterations\n )\n if ds.tmp_parameters.has_item(\"filename\"):\n ds.tmp_parameters.filename += \"_after_R-L_deconvolution_%iiter\" % iterations\n return ds\n\n richardson_lucy_deconvolution.__doc__ %= (SHOW_PROGRESSBAR_ARG, NUM_WORKERS_ARG)\n\n def _are_microscope_parameters_missing(self, ignore_parameters=[]):\n \"\"\"\n Check if the EELS parameters necessary to calculate the GOS\n are defined in metadata. If not, in interactive mode\n raises an UI item to fill the values.\n The `ignore_parameters` list can be to ignore parameters.\n \"\"\"\n must_exist = (\n \"Acquisition_instrument.TEM.convergence_angle\",\n \"Acquisition_instrument.TEM.beam_energy\",\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n )\n missing_parameters = []\n for item in must_exist:\n exists = self.metadata.has_item(item)\n if exists is False and item.split(\".\")[-1] not in ignore_parameters:\n missing_parameters.append(item)\n if missing_parameters:\n _logger.info(\"Missing parameters {}\".format(missing_parameters))\n return True\n else:\n return False\n\n def set_microscope_parameters(\n self,\n beam_energy=None,\n convergence_angle=None,\n collection_angle=None,\n toolkit=None,\n display=True,\n ):\n if set((beam_energy, convergence_angle, collection_angle)) == {None}:\n tem_par = EELSTEMParametersUI(self)\n return tem_par.gui(toolkit=toolkit, display=display)\n mp = self.metadata\n if beam_energy is not None:\n mp.set_item(\"Acquisition_instrument.TEM.beam_energy\", beam_energy)\n if convergence_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.convergence_angle\", convergence_angle\n )\n if collection_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n collection_angle,\n )\n\n set_microscope_parameters.__doc__ = \"\"\"\n Set the microscope parameters that are necessary to calculate\n the GOS.\n\n If not all of them are defined, in interactive mode\n raises an UI item to fill the values.\n\n beam_energy: float\n The energy of the electron beam in keV.\n convergence_angle : float\n The microscope convergence semi-angle in mrad.\n collection_angle : float\n The collection semi-angle in mrad.\n {}\n {}\n \"\"\".format(\n TOOLKIT_DT, DISPLAY_DT\n )\n\n def power_law_extrapolation(\n self, window_size=20, extrapolation_size=1024, add_noise=False, fix_neg_r=False\n ):\n \"\"\"\n Extrapolate the spectrum to the right using a powerlaw.\n\n Parameters\n ----------\n window_size : int\n The number of channels from the right side of the\n spectrum that are used to estimate the power law\n parameters.\n extrapolation_size : int\n Size of the extrapolation in number of channels\n add_noise : bool\n If True, add poissonian noise to the extrapolated spectrum.\n fix_neg_r : bool\n If True, the negative values for the \"components.PowerLaw\"\n parameter r will be flagged and the extrapolation will be\n done with a constant zero-value.\n\n Returns\n -------\n A new spectrum, with the extrapolation.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0]\n s = self.deepcopy()\n s.metadata.General.title += \" %i channels extrapolated\" % extrapolation_size\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename += (\n \"_%i_channels_extrapolated\" % extrapolation_size\n )\n new_shape = list(self.data.shape)\n new_shape[axis.index_in_array] += extrapolation_size\n if self._lazy:\n left_data = s.data\n right_shape = list(self.data.shape)\n right_shape[axis.index_in_array] = extrapolation_size\n right_chunks = list(self.data.chunks)\n right_chunks[axis.index_in_array] = (extrapolation_size,)\n right_data = da.zeros(\n shape=tuple(right_shape),\n chunks=tuple(right_chunks),\n dtype=self.data.dtype,\n )\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n # just old code\n s.data = np.zeros(new_shape)\n s.data[..., : axis.size] = self.data\n s.get_dimensions_from_data()\n pl = PowerLaw()\n pl._axes_manager = self.axes_manager\n A, r = pl.estimate_parameters(\n s,\n axis.index2value(axis.size - window_size),\n axis.index2value(axis.size - 1),\n out=True,\n )\n if fix_neg_r is True:\n A = np.where(r <= 0, 0, A)\n # If the signal is binned we need to bin the extrapolated power law\n # what, in a first approximation, can be done by multiplying by the\n # axis step size.\n if self.axes_manager[-1].is_binned:\n factor = s.axes_manager[-1].scale\n else:\n factor = 1\n if self._lazy:\n # only need new axes if the navigation dimension is not 0\n if s.axes_manager.navigation_dimension:\n rightslice = (..., None)\n axisslice = (None, slice(axis.size, None))\n else:\n rightslice = (...,)\n axisslice = (slice(axis.size, None),)\n right_chunks[axis.index_in_array] = 1\n x = da.from_array(\n s.axes_manager.signal_axes[0].axis[axisslice],\n chunks=(extrapolation_size,),\n )\n A = A[rightslice]\n r = r[rightslice]\n right_data = factor * A * x ** (-r)\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n s.data[..., axis.size :] = (\n factor\n * A[..., np.newaxis]\n * s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size :]\n ** (-r[..., np.newaxis])\n )\n return s\n\n def kramers_kronig_analysis(\n self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False\n ):\n r\"\"\"\n Calculate the complex dielectric function from a single scattering\n distribution (SSD) using the Kramers-Kronig relations.\n\n It uses the FFT method as in [1]_. The SSD is an\n EELSSpectrum instance containing SSD low-loss EELS with no zero-loss\n peak. The internal loop is devised to approximately subtract the\n surface plasmon contribution supposing an unoxidized planar surface and\n neglecting coupling between the surfaces. This method does not account\n for retardation effects, instrumental broadening and surface plasmon\n excitation in particles.\n\n Note that either refractive index or thickness are required.\n If both are None or if both are provided an exception is raised.\n\n Parameters\n ----------\n zlp : {None, number, Signal1D}\n ZLP intensity. It is optional (can be None) if `t` is None and `n`\n is not None and the thickness estimation is not required. If `t`\n is not None, the ZLP is required to perform the normalization and\n if `t` is not None, the ZLP is required to calculate the thickness.\n If the ZLP is the same for all spectra, the integral of the ZLP\n can be provided as a number. Otherwise, if the ZLP intensity is not\n the same for all spectra, it can be provided as i) a Signal1D\n of the same dimensions as the current signal containing the ZLP\n spectra for each location ii) a BaseSignal of signal dimension 0\n and navigation_dimension equal to the current signal containing the\n integrated ZLP intensity.\n iterations : int\n Number of the iterations for the internal loop to remove the\n surface plasmon contribution. If 1 the surface plasmon contribution\n is not estimated and subtracted (the default is 1).\n n : {None, float}\n The medium refractive index. Used for normalization of the\n SSD to obtain the energy loss function. If given the thickness\n is estimated and returned. It is only required when `t` is None.\n t : {None, number, Signal1D}\n The sample thickness in nm. Used for normalization of the SSD\n to obtain the energy loss function. It is only required when\n `n` is None. If the thickness is the same for all spectra it can be\n given by a number. Otherwise, it can be provided as a BaseSignal\n with signal dimension 0 and navigation_dimension equal to the\n current signal.\n delta : float\n A small number (0.1-0.5 eV) added to the energy axis in\n specific steps of the calculation the surface loss correction to\n improve stability.\n full_output : bool\n If True, return a dictionary that contains the estimated\n thickness if `t` is None and the estimated surface plasmon\n excitation and the spectrum corrected from surface plasmon\n excitations if `iterations` > 1.\n\n Returns\n -------\n eps: DielectricFunction instance\n The complex dielectric function results,\n\n .. math::\n \\epsilon = \\epsilon_1 + i*\\epsilon_2,\n\n contained in an DielectricFunction instance.\n output: Dictionary (optional)\n A dictionary of optional outputs with the following keys\n\n * ``thickness``: the estimated thickness in nm calculated by\n normalization of the SSD (only when ``t`` is None)\n * ``surface plasmon estimation``: the estimated surface plasmon\n excitation (only if ``iterations`` > 1.)\n\n Raises\n ------\n ValueError\n If both `n` and `t` are undefined (None).\n AttributeError\n If the beam_energy or the collection semi-angle are not defined in\n metadata.\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n This method is based in Egerton's Matlab code [1]_ with a\n minor difference: the wrap-around problem when computing the FFTs is\n workarounded by padding the signal instead of subtracting the\n reflected tail.\n\n .. [1] Ray Egerton, \"Electron Energy-Loss Spectroscopy in the Electron\n Microscope\", Springer-Verlag, 2011.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n output = {}\n if iterations == 1:\n # In this case s.data is not modified so there is no need to make\n # a deep copy.\n s = self.isig[0.0:]\n else:\n s = self.isig[0.0:].deepcopy()\n\n sorig = self.isig[0.0:]\n # Avoid singularity at 0\n if s.axes_manager.signal_axes[0].axis[0] == 0:\n s = s.isig[1:]\n sorig = self.isig[1:]\n\n # Constants and units\n me = constants.value(\"electron mass energy equivalent in MeV\") * 1e3 # keV\n\n # Mapped parameters\n self._are_microscope_parameters_missing(ignore_parameters=[\"convergence_angle\"])\n e0 = s.metadata.Acquisition_instrument.TEM.beam_energy\n beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle\n\n axis = s.axes_manager.signal_axes[0]\n eaxis = axis.axis.copy()\n\n if isinstance(zlp, hyperspy.signal.BaseSignal):\n if (\n zlp.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ):\n if zlp.axes_manager.signal_dimension == 0:\n i0 = zlp.data\n else:\n i0 = zlp.integrate1D(axis.index_in_axes_manager).data\n else:\n raise ValueError(\n \"The ZLP signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n # The following prevents errors if the signal is a single spectrum\n if len(i0) != 1:\n i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))\n elif isinstance(zlp, numbers.Number):\n i0 = zlp\n else:\n raise ValueError(\n \"The zero-loss peak input is not valid, it must be\\\n in the BaseSignal class or a Number.\"\n )\n\n if isinstance(t, hyperspy.signal.BaseSignal):\n if (\n t.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ) and (t.axes_manager.signal_dimension == 0):\n t = t.data\n t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))\n else:\n raise ValueError(\n \"The thickness signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):\n raise ValueError(\n \"thickness must be a HyperSpy signal or a number,\" \" not a NumPy array.\"\n )\n\n # Slicer to get the signal data from 0 to axis.size\n slicer = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, axis.size)),\n ]\n )\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # Norm(SSD) = Imag(-1/epsilon) (Energy Loss Function, ELF)\n\n # We start by the \"angular corrections\"\n Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale\n if n is None and t is None:\n raise ValueError(\n \"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\"\n )\n elif n is not None and t is not None:\n raise ValueError(\n \"Please provide the refractive index OR the \"\n \"thickness information, not both\"\n )\n elif n is not None:\n # normalize using the refractive index.\n K = (Im / eaxis).sum(\n axis=axis.index_in_array, keepdims=True\n ) * axis.scale\n K = K / (np.pi / 2) / (1 - 1.0 / n**2)\n # K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(\n # np.insert(K.shape, axis.index_in_array, 1))\n # Calculate the thickness only if possible and required\n if zlp is not None and (full_output is True or iterations > 1):\n te = 332.5 * K * ke / i0\n if full_output is True:\n output[\"thickness\"] = te\n elif t is not None:\n if zlp is None:\n raise ValueError(\n \"The ZLP must be provided when the \"\n \"thickness is used for normalization.\"\n )\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = optimal_fft_size(2 * axis.size)\n q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize\n\n q[slicer] *= -1\n q = np.fft.fft(q, axis=axis.index_in_array)\n # Final touch, we have Re(1/eps)\n Re = q[slicer].real + 1\n\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re**2 + Im**2)\n e2 = Im / (Re**2 + Im**2)\n\n if iterations > 1 and zlp is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vacuum border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im\n adep = tgt / (eaxis + delta) * np.arctan(\n beta * tgt / axis.axis\n ) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2)\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale\n s.data = sorig.data - Srfint\n _logger.debug(\"Iteration number: %d / %d\", io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n sp = sorig._deepcopy_with_new_data(Srfint)\n sp.metadata.General.title += (\n \" estimated surface plasmon excitation.\"\n )\n output[\"surface plasmon estimation\"] = sp\n del sp\n del Srfint\n\n eps = s._deepcopy_with_new_data(e1 + e2 * 1j)\n del s\n eps.set_signal_type(\"DielectricFunction\")\n eps.metadata.General.title = (\n self.metadata.General.title + \"dielectric function \"\n \"(from Kramers-Kronig analysis)\"\n )\n if eps.tmp_parameters.has_item(\"filename\"):\n eps.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_CDF_after_Kramers_Kronig_transform\"\n )\n if \"thickness\" in output:\n # As above,prevent errors if the signal is a single spectrum\n if len(te) != 1:\n te = te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])]\n thickness = eps._get_navigation_signal(data=te)\n thickness.metadata.General.title = (\n self.metadata.General.title + \" thickness \"\n \"(calculated using Kramers-Kronig analysis)\"\n )\n output[\"thickness\"] = thickness\n if full_output is False:\n return eps\n else:\n return eps, output\n\n def create_model(\n self,\n low_loss=None,\n auto_background=True,\n auto_add_edges=True,\n GOS=\"gosh\",\n gos_file_path=None,\n dictionary=None,\n ):\n \"\"\"Create a model for the current EELS data.\n\n Parameters\n ----------\n %s\n\n Returns\n -------\n model : :class:`~.models.eelsmodel.EELSModel` instance.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n \"\"\"\n from exspy.models.eelsmodel import EELSModel\n\n if low_loss is not None and not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"Multiple scattering is not implemented for spectra with a \"\n \"non-uniform energy axis. To create a model that does not \"\n \"account for multiple-scattering do not set the `ll` keyword.\"\n )\n model = EELSModel(\n self,\n low_loss=low_loss,\n auto_background=auto_background,\n auto_add_edges=auto_add_edges,\n GOS=GOS,\n dictionary=dictionary,\n )\n return model\n\n create_model.__doc__ %= EELSMODEL_PARAMETERS\n\n def plot(self, plot_edges=False, only_edges=(\"Major\", \"Minor\"), **kwargs):\n \"\"\"\n Plot the EELS spectrum. Markers indicating the position of the\n EELS edges can be added.\n\n Parameters\n ----------\n plot_edges : {False, True, list of string or string}\n If True, draws on s.metadata.Sample.elements for edges.\n Alternatively, provide a string of a single edge, or an iterable\n containing a list of valid elements, EELS families or edges. For\n example, an element should be 'Zr', an element edge family should\n be 'Zr_L' or an EELS edge 'Zr_L3'.\n only_edges : tuple of string\n Either 'Major' or 'Minor'. Defaults to both.\n kwargs\n The extra keyword arguments for plot()\n \"\"\"\n\n super().plot(**kwargs)\n\n if plot_edges:\n # edges is a mapping {edge_name:edge_energy}\n edges = self._get_edges_to_plot(plot_edges, only_edges)\n self._plot_edge_labels(edges)\n\n self._plot.signal_plot.events.closed.connect(self._on_signal_plot_closing, [])\n\n def _on_signal_plot_closing(self):\n self._edge_markers = {\"lines\": None, \"texts\": None, \"names\": []}\n\n def _get_offsets_and_segments(self, edges):\n index = np.array([float(v) for v in edges.values()]) # dictionaries\n segments = np.empty((len(index), 2, 2))\n offsets = np.empty((len(index), 2))\n for i, ind in enumerate(index):\n segments[i] = [[ind, 1], [ind, 1.1]]\n offsets[i] = [ind, 1.1]\n return offsets, segments\n\n def _initialise_markers(self):\n self._edge_markers[\"lines\"] = Lines(\n segments=np.empty((0, 2, 2)),\n transform=\"relative\",\n color=\"black\",\n shift=np.array([0.0, 0.19]),\n )\n self._edge_markers[\"texts\"] = Texts(\n offsets=np.empty((0, 2)),\n texts=np.empty((0,)),\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.2,\n )\n for key in [\"lines\", \"texts\"]:\n self.add_marker(self._edge_markers[key], render_figure=False)\n\n def _plot_edge_labels(self, edges):\n \"\"\"\n Plot the EELS edge label (vertical line segment and text box) on\n the signal\n\n Parameters\n ----------\n edges : dictionary\n A dictionary with the labels as keys and their energies as values.\n For example, {'Fe_L2': 721.0, 'O_K': 532.0}\n\n \"\"\"\n # the object is needed to connect replot method when axes_manager\n # indices changed\n _ = EdgesRange(self, interactive=False)\n self._add_edge_labels(edges)\n\n def _get_edges_to_plot(self, plot_edges, only_edges):\n # get the dictionary of the edge to be shown\n extra_element_edge_family = []\n if plot_edges is True:\n try:\n elements = self.metadata.Sample.elements\n except AttributeError:\n raise ValueError(\n \"No elements defined. Add them with \"\n \"s.add_elements, or specify elements, edge \"\n \"families or edges directly\"\n )\n else:\n extra_element_edge_family.extend(np.atleast_1d(plot_edges))\n try:\n elements = self.metadata.Sample.elements\n except:\n elements = []\n\n element_edge_family = elements + extra_element_edge_family\n edges_dict = self._get_edges(element_edge_family, only_edges)\n\n return edges_dict\n\n def _get_edges(self, element_edge_family, only_edges):\n # get corresponding information depending on whether it is an element\n # a particular edge or a family of edge\n axis_min = self.axes_manager[-1].low_value\n axis_max = self.axes_manager[-1].high_value\n\n names_and_energies = {}\n shells = [\"K\", \"L\", \"M\", \"N\", \"O\"]\n\n errmsg = \"Edge family '{}' is not supported. Supported edge family \" \"is {}.\"\n for member in element_edge_family:\n try:\n element, ss = member.split(\"_\")\n\n if len(ss) == 1:\n memtype = \"family\"\n if ss not in shells:\n raise AttributeError(errmsg.format(ss, shells))\n if len(ss) == 2:\n memtype = \"edge\"\n if ss[0] not in shells:\n raise AttributeError(errmsg.format(ss[0], shells))\n except ValueError:\n element = member\n ss = \"\"\n memtype = \"element\"\n\n try:\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n except KeyError as err:\n raise ValueError(\"'{}' is not a valid element\".format(element)) from err\n\n for edge in Binding_energies.keys():\n relevance = Binding_energies[edge][\"relevance\"]\n energy = Binding_energies[edge][\"onset_energy (eV)\"]\n\n isInRel = relevance in only_edges\n isInRng = axis_min < energy < axis_max\n isSameFamily = ss in edge\n\n if memtype == \"element\":\n flag = isInRel & isInRng\n edge_key = element + \"_\" + edge\n elif memtype == \"edge\":\n flag = isInRng & (edge == ss)\n edge_key = member\n elif memtype == \"family\":\n flag = isInRel & isInRng & isSameFamily\n edge_key = element + \"_\" + edge\n\n if flag:\n names_and_energies[edge_key] = energy\n\n return names_and_energies\n\n def _remove_edge_labels(self, edge_names=None, render_figure=True):\n \"\"\"\n Remove EELS edges markers to the signal\n\n Parameters\n ----------\n edge_names : str, list of str or None\n The string must be the name of edges, e. g. 'Fe_L2'.\n If ``None`` (default), remove all edges.\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if edge_names is None:\n edge_names = self._edge_markers[\"names\"]\n if isinstance(edge_names, set):\n # convert to list to find the index\n edge_names = list(edge_names)\n if not isinstance(edge_names, (list, tuple, np.ndarray)):\n edge_names = [edge_names]\n\n ind = np.where(np.isin(self._edge_markers[\"names\"], edge_names))\n\n if self._edge_markers[\"lines\"] is not None:\n self._edge_markers[\"lines\"].remove_items(ind)\n if self._edge_markers[\"texts\"] is not None:\n self._edge_markers[\"texts\"].remove_items(ind)\n if self._edge_markers[\"names\"] is not []:\n self._edge_markers[\"names\"] = np.delete(self._edge_markers[\"names\"], ind)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_edge_labels(self, edges, render_figure=True):\n \"\"\"\n Add EELS edges markers to the signal\n\n Parameters\n ----------\n edge_name : dictionary or set\n If dictionary must be the name of edge as key and energy as values,\n e.g. {'Cr_L2': 584.0}. If list or set, must the name of the edge,\n e.g. set('Cr_L2', )\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if isinstance(edges, set):\n edges_dict = {}\n for edge in edges:\n element, ss = edge.split(\"_\")\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n edges_dict[edge] = Binding_energies[ss][\"onset_energy (eV)\"]\n edges = edges_dict\n\n offsets, segments = self._get_offsets_and_segments(edges)\n names = list(edges.keys())\n\n self._edge_markers[\"lines\"].add_items(segments=segments)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"texts\"].add_items(offsets=offsets, texts=names)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"names\"] = np.append(self._edge_markers[\"names\"], names)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _get_complementary_edges(self, edges, only_major=False):\n \"\"\"\n Get other edges of the same element present within the energy\n range of the axis\n\n Parameters\n ----------\n edges : iterable\n A sequence of strings contains edges in the format of\n element_subshell for EELS. For example, ['Fe_L2', 'O_K']\n only_major : bool\n Whether to show only the major edges. The default is False.\n\n Returns\n -------\n complmt_edges : list\n A list containing all the complementary edges of the same element\n present within the energy range of the axis\n \"\"\"\n\n emin = self.axes_manager[-1].low_value\n emax = self.axes_manager[-1].high_value\n complmt_edges = []\n\n elements = set()\n for edge in edges:\n element, _ = edge.split(\"_\")\n elements.update([element])\n\n for element in elements:\n ss_info = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]\n\n for subshell in ss_info:\n sse = ss_info[subshell][\"onset_energy (eV)\"]\n ssr = ss_info[subshell][\"relevance\"]\n\n if only_major:\n if ssr != \"Major\":\n continue\n\n edge = element + \"_\" + subshell\n if (\n (emin <= sse <= emax)\n and (subshell[-1] != \"a\")\n and (edge not in edges)\n ):\n complmt_edges.append(edge)\n\n return complmt_edges\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape, scale=scale\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n mdeels = m.metadata\n m.get_dimensions_from_data()\n if m.metadata.get_item(\"Acquisition_instrument.TEM.Detector.EELS\"):\n mdeels = m.metadata.Acquisition_instrument.TEM.Detector.EELS\n if \"dwell_time\" in mdeels:\n mdeels.dwell_time *= time_factor\n if \"exposure\" in mdeels:\n mdeels.exposure *= time_factor\n else:\n _logger.info(\n \"No dwell_time could be found in the metadata so \"\n \"this has not been updated.\"\n )\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = hyperspy.signal.BaseSignal.rebin.__doc__\n\n def vacuum_mask(\n self, threshold=10.0, start_energy=None, closing=True, opening=False\n ):\n \"\"\"\n Generate mask of the vacuum region\n\n Parameters\n ----------\n threshold: float\n For a given navigation coordinate, mean value in the energy axis\n below which the pixel is considered as vacuum.\n start_energy: float, None\n Minimum energy included in the calculation of the mean intensity.\n If None, consider only the last quarter of the spectrum to\n calculate the mask.\n closing: bool\n If True, a morphological closing is applied to the mask.\n opening: bool\n If True, a morphological opening is applied to the mask.\n\n Returns\n -------\n mask: signal\n The mask of the region.\n \"\"\"\n if self.axes_manager.navigation_dimension == 0:\n raise RuntimeError(\n \"Navigation dimenstion must be higher than 0 \"\n \"to estimate a vacuum mask.\"\n )\n signal_axis = self.axes_manager.signal_axes[0]\n if start_energy is None:\n start_energy = 0.75 * signal_axis.high_value\n\n mask = self.isig[start_energy:].mean(-1) <= threshold\n\n from scipy.ndimage import binary_dilation, binary_erosion\n\n if closing:\n mask.data = binary_dilation(mask.data, border_value=0)\n mask.data = binary_erosion(mask.data, border_value=1)\n if opening:\n mask.data = binary_erosion(mask.data, border_value=1)\n mask.data = binary_dilation(mask.data, border_value=0)\n return mask" } ]
import numpy as np import pytest import hyperspy.api as hs from hyperspy.components1d import Lorentzian from exspy.components import VolumePlasmonDrude from exspy.misc.eels.tools import eels_constant from exspy.signals import EELSSpectrum
20,729
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. class Test2D: def setup_method(self, method): """To test the kramers_kronig_analysis we will generate 3 EELSSpectrum instances. First a model energy loss function(ELF), in our case following the Drude bulk plasmon peak. Second, we simulate the inelastic scattering to generate a model scattering distribution (SPC). Finally, we use a lorentzian peak with integral equal to 1 to simulate a ZLP. """ # Parameters i0 = 1.0 t = hs.signals.BaseSignal(np.arange(10, 70, 10).reshape((2, 3))) t = t.transpose(signal_axes=0) scale = 0.02 # Create an 3x2x2048 spectrum with Drude plasmon
s = EELSSpectrum(np.zeros((2, 3, 2 * 2048)))
2
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n def avg(self):\n def global_avg(self):\n def max(self):\n def value(self):\n def __str__(self):\n def __init__(self, delimiter=\"\\t\"):\n def update(self, **kwargs):\n def __getattr__(self, attr):\n def __str__(self):\n def global_avg(self):\n def synchronize_between_processes(self):\n def add_meter(self, name, meter):\n def log_every(self, iterable, print_freq, header=None):\n def __init__(self, *args, **kwargs):\ndef compute_acc(logits, label, reduction='mean'):\ndef compute_n_params(model, return_str=True):\ndef setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef seed_worker(worker_id):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef save_on_master(*args, **kwargs):\ndef init_distributed_mode(args):\n MB = 1024.0 * 1024.0" }, { "identifier": "create_scheduler", "path": "scheduler/scheduler_factory.py", "snippet": "def create_scheduler(args, optimizer):\n num_epochs = args.epochs\n\n if getattr(args, 'lr_noise', None) is not None:\n lr_noise = getattr(args, 'lr_noise')\n if isinstance(lr_noise, (list, tuple)):\n noise_range = [n * num_epochs for n in lr_noise]\n if len(noise_range) == 1:\n noise_range = noise_range[0]\n else:\n noise_range = lr_noise * num_epochs\n else:\n noise_range = None\n\n lr_scheduler = None\n if args.sched == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'tanh':\n lr_scheduler = TanhLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=args.decay_epochs,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n elif args.sched == 'plateau':\n mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'\n lr_scheduler = PlateauLRScheduler(\n optimizer,\n decay_rate=args.decay_rate,\n patience_t=args.patience_epochs,\n lr_min=args.min_lr,\n mode=mode,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cooldown_t=0,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n\n return lr_scheduler, num_epochs" }, { "identifier": "create_optimizer", "path": "optim/optim_factory.py", "snippet": "def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip)\n weight_decay = 0.\n else:\n parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())]\n #model.parameters()\n\n # print(parameters)\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n if hasattr(args, 'opt_args') and args.opt_args is not None:\n opt_args.update(args.opt_args)\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp': \n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp': \n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'novograd':\n optimizer = NovoGrad(parameters, **opt_args)\n elif opt_lower == 'nvnovograd':\n optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "train", "path": "engine/train.py", "snippet": "def train(model, image_encoder, text_encoder, tokenizer, data_loader, optimizer, epoch, warmup_steps, device, scheduler, args, config, writer):\n clip_loss = ClipLoss()\n ce_loss = nn.CrossEntropyLoss(ignore_index=-1)\n \n if args.add_dataset:\n ASL_loss = AsymmetricLossAdd(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n else:\n ASL_loss = AsymmetricLoss(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n\n loss_m = AverageMeter()\n loss_clip_m = AverageMeter()\n loss_ce_m = AverageMeter()\n loss_ce_image_m = AverageMeter()\n loss_ce_text_m = AverageMeter()\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n\n model.train() \n image_encoder.train() \n text_encoder.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce_image', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n if args.use_entity_features:\n metric_logger.add_meter('loss_ce_text', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_clip', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.update(loss=1.0)\n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n header = 'Train Epoch: [{}]'.format(epoch)\n print_freq = 50 \n step_size = 100\n warmup_iterations = warmup_steps*step_size \n scalar_step = epoch*len(data_loader)\n num_batches_per_epoch = data_loader.num_batches\n sample_digits = math.ceil(math.log(data_loader.num_samples + 1, 10))\n\n for i, sample in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n if args.fourier:\n image = fourier_aug(sample['image'].to(device))\n else:\n image = sample['image'].to(device) \n label = sample['label'].long().to(device)\n\n if args.ignore_index:\n pass\n else:\n label[label==-1]=0\n entity = sample['entity']\n\n if args.add_dataset:\n dataset_label = sample['label_dataset']\n\n data_time_m.update(time.time() - end)\n\n optimizer.zero_grad()\n\n if args.add_dataset:\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n\n else:\n\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration']\n \n \n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n entity_features = get_text_features(text_encoder,entity,tokenizer,device,max_length=args.max_length)\n\n image_features,image_features_pool = image_encoder(image)\n if args.add_dataset:\n pred_class_image, moe_img = model(image_features,text_features,args)\n else:\n pred_class_image = model(image_features,text_features)\n\n\n if args.bce or args.asl:\n label = label.float()\n\n label_mask = (label != -1).squeeze()\n\n\n\n if args.add_dataset:\n loss_moe_img = moe_cl_loss(moe_img, dataset_label)\n\n if args.asl:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label_image.view(-1,1))\n elif args.bce:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = F.binary_cross_entropy(pred_class_image.view(-1,1),label_image.view(-1,1))\n else:\n if args.asl:\n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_image = F.binary_cross_entropy_with_logits(pred_class_image.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_image = ce_loss(pred_class_image.view(-1,2),label.view(-1)) \n\n if args.use_entity_features:\n if args.add_dataset:\n pred_class_text, moe_txt = model(entity_features.unsqueeze(1),text_features,args)\n loss_moe_txt = moe_cl_loss(moe_txt, dataset_label)\n else:\n pred_class_text = model(entity_features.unsqueeze(1),text_features)\n\n if args.add_dataset:\n if args.asl:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label_text.view(-1,1))\n \n elif args.bce:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = F.binary_cross_entropy(pred_class_text.view(-1,1),label_text.view(-1,1))\n\n else:\n if args.asl:\n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_text = F.binary_cross_entropy_with_logits(pred_class_text.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_text = ce_loss(pred_class_text.view(-1,2),label.view(-1))\n\n loss_ce = loss_ce_image + loss_ce_text\n if args.add_dataset:\n loss_moe = loss_moe_img + loss_moe_txt\n\n else:\n loss_ce = loss_ce_image\n if args.add_dataset:\n loss_moe = loss_moe_img\n\n\n loss_clip = clip_loss(image_features_pool,entity_features)\n if args.add_dataset:\n loss = loss_ce + loss_clip * args.loss_ratio + args.moe_ratio * loss_moe\n else:\n loss = loss_ce + loss_clip * args.loss_ratio\n \n\n loss.backward()\n optimizer.step() \n \n writer.add_scalar('loss/loss', loss, scalar_step)\n writer.add_scalar('loss/loss_ce', loss_ce, scalar_step)\n writer.add_scalar('loss/loss_ce_image', loss_ce_image, scalar_step)\n if args.use_entity_features:\n writer.add_scalar('loss/loss_ce_text', loss_ce_text, scalar_step)\n writer.add_scalar('loss/loss_clip', loss_clip, scalar_step)\n scalar_step += 1\n\n metric_logger.update(loss=loss.item())\n metric_logger.update(loss_ce=loss_ce.item())\n metric_logger.update(loss_ce_image=loss_ce_image.item())\n if args.use_entity_features:\n metric_logger.update(loss_ce_text=loss_ce_text.item())\n metric_logger.update(loss_clip=loss_clip.item())\n\n\n if epoch==0 and i%step_size==0 and i<=warmup_iterations: \n scheduler.step(i//step_size) \n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i + 1\n if i % 100 == 0:\n batch_size = len(image)\n num_samples = batch_count * batch_size\n samples_per_epoch = data_loader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n loss_m.update(loss.item(), batch_size)\n loss_clip_m.update(loss_clip.item(), batch_size)\n loss_ce_m.update(loss_ce.item(), batch_size)\n loss_ce_image_m.update(loss_ce_image.item(), batch_size)\n if args.use_entity_features:\n loss_ce_text_m.update(loss_ce_text.item(), batch_size)\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Loss_ce_text: {loss_ce_text_m.val:#.5g} ({loss_ce_text_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n else:\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger.global_avg()) \n return {k: \"{:.6f}\".format(meter.global_avg) for k, meter in metric_logger.meters.items()} #,loss_epoch.mean()" }, { "identifier": "valid_on_cheXpert", "path": "engine/train.py", "snippet": "def valid_on_cheXpert(model,image_encoder,text_encoder,tokenizer,data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = ['atelectasis', 'cardiomegaly', 'consolidation', 'edema', 'pleural effusion']\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n # initialize the ground truth and output tensor\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n \n # \n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n \n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class=5)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "valid_on_chestxray14", "path": "engine/train.py", "snippet": "def valid_on_chestxray14(model, image_encoder, text_encoder, tokenizer, data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = [\"atelectasis\",\"cardiomegaly\",\"pleural effusion\",\"infiltration\",\"lung mass\",\"lung nodule\",\"pneumonia\",\"pneumothorax\",\"consolidation\",\"edema\",\"emphysema\",\"fibrosis\",\"pleural thicken\",\"hernia\"]\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n\n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n\n\n\n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class = 14)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "CLP_clinical", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n self.mlp_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n nn.GELU(),\n nn.Linear(embed_dim, embed_dim)\n )\n self.embed_dim = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.init_parameters()\n \n def init_parameters(self):\n nn.init.constant_(self.logit_scale, np.log(1 / 0.07))\n for m in self.mlp_embed:\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=self.embed_dim ** -0.5)\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n config = BertConfig.from_pretrained(bert_model_name, output_hidden_states=True)#bert-base-uncased\n model = AutoModel.from_pretrained(bert_model_name, config=config)#, return_dict=True)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n #input batch_size,token, return batch_size,dim \n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n last_hidden_state, pooler_output, hidden_states = output[0],output[1],output[2]\n encode_out = self.mlp_embed(pooler_output)\n # encode_out = pooler_output\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "ModelRes", "path": "models/clip_tqn.py", "snippet": "class ModelRes(nn.Module):\n def __init__(self, res_base_model):\n super(ModelRes, self).__init__()\n self.resnet_dict = {\"resnet50\": models.resnet50(pretrained=True)}\n self.resnet = self._get_res_basemodel(res_base_model)\n\n num_ftrs = int(self.resnet.fc.in_features)\n self.res_features = nn.Sequential(*list(self.resnet.children())[:-2])\n\n self.res_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.res_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_res_basemodel(self, res_model_name):\n try:\n res_model = self.resnet_dict[res_model_name]\n print(\"Image feature extractor:\", res_model_name)\n return res_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: resnet18 or resnet50\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n res_fea = self.res_features(img)\n\n res_fea = rearrange(res_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(res_fea,'b n d -> (b n) d')\n x = self.res_l1(h)\n x = F.relu(x)\n x = self.res_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "TQN_Model", "path": "models/clip_tqn.py", "snippet": "class TQN_Model(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n out = self.mlp_head(features) #(batch_size, query_num)\n return out" }, { "identifier": "TQN_Model_Add", "path": "models/clip_tqn.py", "snippet": "class TQN_Model_Add(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n gate_num: int = 3,\n high_dim: int = 32,\n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.decoderV1_1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_2 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_3 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n\n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_1 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_2 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_3 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n ) \n \n self.gate_head = nn.Sequential(\n nn.Linear(embed_dim, gate_num)\n )\n self.cl_head = nn.Sequential(\n nn.Linear(gate_num, high_dim)\n )\n\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features, args):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n gate_weight = self.gate_head(image_features_pool.squeeze(0)) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n \n \n if args.finetune:\n features_1 = self.decoderV1_1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_1 = self.dropout_feas(features_1).transpose(0,1) \n features_2 = self.decoderV1_2(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_2 = self.dropout_feas(features_2).transpose(0,1) \n features_3 = self.decoderV1_3(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_3 = self.dropout_feas(features_3).transpose(0,1) \n \n out_1 = torch.sigmoid(self.mlp_head_1(features_1))\n out_2 = torch.sigmoid(self.mlp_head_2(features_2))\n out_3 = torch.sigmoid(self.mlp_head_3(features_3))\n\n\n out = self.mlp_head(features)\n \n gate_weight = torch.softmax(gate_weight, dim=1)\n out = torch.sigmoid(out)\n\n high_dimension = self.cl_head(gate_weight)\n out_bias = gate_weight[:,0].unsqueeze(1).unsqueeze(2) * out_1 + gate_weight[:,1].unsqueeze(1).unsqueeze(2) * out_2 + gate_weight[:,2].unsqueeze(1).unsqueeze(2) * out_3\n\n out = args.main_ratio * out + args.bias_ratio * out_bias\n\n return out, high_dimension" }, { "identifier": "ModelDense", "path": "models/clip_tqn.py", "snippet": "class ModelDense(nn.Module):\n def __init__(self, dense_base_model):\n super(ModelDense, self).__init__()\n \n self.densenet_dict = {\"densenet121\": models.densenet121(pretrained=True)}#,\n # \"densenet161\": models.densenet161(pretrained=True)}\n self.densenet = self._get_dense_basemodel(dense_base_model)\n num_ftrs = int(self.densenet.classifier.in_features)\n self.dense_features = self.densenet.features\n self.dense_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.dense_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_dense_basemodel(self, dense_base_model):\n try:\n dense_model = self.densenet_dict[dense_base_model]\n print(\"Image feature extractor:\", dense_base_model)\n return dense_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: densenet121 or densenet161\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n dense_fea = self.dense_features(img)#N, 1024, 7,7\n dense_fea = rearrange(dense_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(dense_fea,'b n d -> (b n) d')\n x = self.dense_l1(h)\n x = F.relu(x)\n x = self.dense_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "CLP_clinical2", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical2(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n model = AutoModel.from_pretrained(bert_model_name)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n encode_out = output.last_hidden_state[:,0,:]\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "BertTokenizer", "path": "models/tokenization_bert.py", "snippet": "class BertTokenizer(PreTrainedTokenizer):\n r\"\"\"\n Construct a BERT tokenizer. Based on WordPiece.\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.\n Users should refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to lowercase the input when tokenizing.\n do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to do basic tokenization before WordPiece.\n never_split (:obj:`Iterable`, `optional`):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n :obj:`do_basic_tokenize=True`\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (:obj:`str`, `optional`, defaults to :obj:`\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (:obj:`str`, `optional`, defaults to :obj:`\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (:obj:`str`, `optional`, defaults to :obj:`\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to tokenize Chinese characters.\n This should likely be deactivated for Japanese (see this `issue\n <https://github.com/huggingface/transformers/issues/328>`__).\n strip_accents: (:obj:`bool`, `optional`):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for :obj:`lowercase` (as in the original BERT).\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(\n self,\n vocab_file,\n do_lower_case=True,\n do_basic_tokenize=True,\n never_split=None,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n do_lower_case=do_lower_case,\n do_basic_tokenize=do_basic_tokenize,\n never_split=never_split,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"\n \"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(vocab_file)\n )\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(\n do_lower_case=do_lower_case,\n never_split=never_split,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n )\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)\n\n @property\n def do_lower_case(self):\n return self.basic_tokenizer.do_lower_case\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n def get_vocab(self):\n return dict(self.vocab, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n split_tokens = []\n if self.do_basic_tokenize:\n for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):\n\n # If the token is part of the never_split set\n if token in self.basic_tokenizer.never_split:\n split_tokens.append(token)\n else:\n split_tokens += self.wordpiece_tokenizer.tokenize(token)\n else:\n split_tokens = self.wordpiece_tokenizer.tokenize(text)\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n - single sequence: ``[CLS] X ``\n - pair of sequences: ``[CLS] A [SEP] B [SEP]``\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` method.\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the token list is already formatted with special tokens for the model.\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\n \"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formatted with special tokens for the model.\"\n )\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is not None:\n return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1]\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence\n pair mask has the following format:\n ::\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given\n sequence(s).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n index = 0\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n else:\n vocab_file = (filename_prefix + \"-\" if filename_prefix else \"\") + save_directory\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(vocab_file)\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" }, { "identifier": "MIMIC_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class MIMIC_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,1:])#40 class for fine-grained query list\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Mergetrain_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Mergetrain_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,2:])#60 class for fine-grained query list\n self.label_dataset_list = np.asarray(data_info.iloc[:,1])\n\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n\n if self.label_dataset_list[index] == 0:\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n # img = Image.open(img_path).convert('RGB') \n # image = self.transform(img)\n # return {\n # \"image\": image,\n # \"label\": class_label,\n # \"caption\": caption_list,\n # \"entity\": entity_details\n # }\n \n else:\n img_path = self.img_path_list[index]\n class_label = self.class_list[index] \n caption_list = ''\n head = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n index_positive = np.where(class_label == 1)\n entity = np.array(head)[index_positive]\n entity_details = ''\n for sub_entity in entity:\n entity_details = entity_details + sub_entity + ' [SEP] '\n\n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n label_dataset = self.label_dataset_list[index]\n\n return {\n \"image\": image,\n \"label\": class_label,\n \"label_dataset\": label_dataset,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Chestxray14_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Chestxray14_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,3:])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize(image_res, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ])\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace('/mnt/petrelfs/zhangxiaoman/DATA/Chestxray/ChestXray8/','/remote-home/share/medical/public/ChestXray8/')\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" }, { "identifier": "CheXpert_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class CheXpert_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,[13,7,11,10,15]])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize([image_res,image_res], interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n def __getitem__(self, index):\n img_path = os.path.join('/remote-home/share/tianjiedai/',self.img_path_list[index])\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" } ]
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
16,347
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True:
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True:
train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args)
14
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ECCommitment", "path": "storage/shared/ecc.py", "snippet": "class ECCommitment:\n \"\"\"\n Elliptic Curve based commitment scheme allowing one to commit to a chosen value while keeping it hidden to others.\n\n Attributes:\n g (ECC.EccPoint): The base point of the elliptic curve used as part of the commitment.\n h (ECC.EccPoint): Another random point on the elliptic curve used as part of the commitment.\n\n Methods:\n commit(m): Accepts a message, hashes it, and produces a commitment to the hashed message.\n open(c, m_val, r): Accepts a commitment, a hashed message, and a random value to verify the commitment.\n\n The `commit` method will print the commitment process, and the `open` method will print the verification process.\n \"\"\"\n\n def __init__(self, g, h, verbose=False):\n self.g = g # Base point of the curve\n self.h = h # Another random point on the curve\n self.verbose = verbose\n\n def commit(self, m): # AKA Seal.\n \"\"\"\n Create a cryptographic commitment to a message.\n\n The message is hashed, and the hash is used along with a random number to form the commitment\n using the public parameters g and h. The commitment can be verified with the `open` method.\n\n Parameters:\n - m (bytes | bytearray | object): The message to commit to.\n\n Returns:\n - tuple: A 3-tuple (commitment, hashed message value, random number used in the commitment).\n\n Side Effects:\n - This method will print the commitment details to the console.\n\n Raises:\n - Exception: If the commitment calculation fails.\n \"\"\"\n m_val = hash_data(m) # Compute hash of the data\n r = random.randint(1, 2**256)\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"Committing: Data = {m}\\nHashed Value = {m_val}\\nRandom Value = {r}\\nComputed Commitment = {c}\\n\"\n )\n return c, m_val, r\n\n def open(self, c, m_val, r):\n \"\"\"\n Verify a commitment using the original message hash and randomness.\n\n This method recomputes the commitment using the public parameters and compares it with\n the provided commitment to check its validity.\n\n Parameters:\n - c (ECC.EccPoint): The commitment point to verify.\n - m_val (int): The integer value of the hashed message used in the commitment.\n - r (int): The random number used in the commitment.\n\n Returns:\n - bool: True if the verification succeeds (commitment is valid), False otherwise.\n\n Side Effects:\n - This method will print the verification details to the console.\n\n Raises:\n - Exception: If the verification calculation fails.\n \"\"\"\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n computed_c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"\\nOpening: Hashed Value = {m_val}\\nRandom Value = {r}\\nRecomputed Commitment = {computed_c}\\nOriginal Commitment = {c}\"\n )\n return computed_c == c" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "hex_to_ecc_point", "path": "storage/shared/ecc.py", "snippet": "def hex_to_ecc_point(hex_str, curve):\n \"\"\"\n Convert a hexadecimal string back into an elliptic curve point.\n\n This function is typically used to deserialize an ECC point that has been transmitted or stored as a hex string.\n\n Parameters:\n - hex_str (str): The hex string representing an elliptic curve point.\n - curve (str): The name of the elliptic curve the point belongs to.\n\n Returns:\n - ECC.EccPoint: The elliptic curve point represented by the hex string.\n\n Raises:\n - ValueError: If the hex string is not properly formatted or does not represent a valid point on the specified curve.\n \"\"\"\n point_str = binascii.unhexlify(hex_str).decode()\n x, y = map(int, point_str.split(\",\"))\n return ECC.EccPoint(x, y, curve=curve)" }, { "identifier": "MerkleTree", "path": "storage/shared/merkle.py", "snippet": "class MerkleTree(object):\n \"\"\"\n Represents a Merkle Tree, a data structure used for efficiently summarizing and verifying the\n integrity of large sets of data. The Merkle Tree is a binary tree where each leaf node is the hash\n of a data block and every non-leaf node is the hash of its children nodes.\n\n Attributes:\n hash_function (callable): The hash function used for generating hashes of the blocks\n and non-leaf nodes in the Merkle Tree.\n leaves (list): A list where each element is a bytearray representing the hashed value of a leaf.\n levels (list of lists): A list of lists where each sublist represents a level of the tree, starting\n from the leaves up to the root.\n is_ready (bool): Indicates whether the tree has been fully constructed and is ready to provide\n the Merkle root and proofs.\n\n Methods:\n add_leaf(values, do_hash=False): Adds one or multiple leaves to the tree. If `do_hash` is True,\n it will hash the values before adding them as leaves.\n get_leaf(index): Retrieves the hexadecimal string representation of a leaf at the given index.\n get_leaf_count(): Returns the total number of leaves in the tree.\n get_tree_ready_state(): Checks if the tree has been fully constructed.\n make_tree(): Constructs the Merkle Tree from the current leaves. This method must be called\n after all leaves are added and before retrieving the Merkle root or proofs.\n get_merkle_root(): Retrieves the Merkle root as a hexadecimal string if the tree is ready.\n get_proof(index): Generates a proof of inclusion for the leaf at the given index. This proof\n consists of a list of sibling hashes that, when combined with the target leaf,\n can reproduce the Merkle root.\n update_leaf(index, new_value): Updates the value of the leaf at the given index with `new_value`\n and recalculates the hashes up the tree to reflect this change.\n serialize(): Converts the Merkle Tree into a JSON-formatted string for storage or transmission.\n deserialize(json_data, hash_type=\"sha3_256\"): Reconstructs the Merkle Tree from a JSON string,\n using the specified hash function.\n\n Raises:\n Exception: If the `hash_type` provided during initialization is not supported or recognized.\n\n Example:\n # Create a Merkle tree using the SHA3-256 hash function\n merkle_tree = MerkleTree(hash_type='sha3_256')\n\n # Add data blocks (as leaves) to the tree\n merkle_tree.add_leaf(['block1', 'block2', 'block3'], do_hash=True)\n\n # Construct the tree\n merkle_tree.make_tree()\n\n # Retrieve the Merkle root\n root = merkle_tree.get_merkle_root()\n\n # Get proof of inclusion for the first data block\n proof = merkle_tree.get_proof(0)\n\n # Update the value of the first leaf and reconstruct the tree\n merkle_tree.update_leaf(0, 'new_block1_hashed_value')\n merkle_tree.make_tree()\n\n # Serialize the tree for storage\n serialized_tree = merkle_tree.serialize()\n\n # Deserialize the tree for later use\n deserialized_tree = MerkleTree.deserialize(serialized_tree, hash_type='sha3_256')\n\n Note:\n The hash_function attribute is determined by the hash_type parameter provided at initialization.\n Only hash types supported by the `hashlib` library can be used. Attempting to use an unsupported\n hash type will result in an exception.\n \"\"\"\n\n def __init__(self, hash_type=\"sha3_256\"):\n hash_type = hash_type.lower()\n if hash_type in [\"sha3_256\"]:\n self.hash_function = getattr(hashlib, hash_type)\n else:\n raise Exception(\"`hash_type` {} nor supported\".format(hash_type))\n\n self.reset_tree()\n\n def __eq__(self, other):\n if not isinstance(other, MerkleTree):\n return False\n return self.serialize() == other.serialize()\n\n def _to_hex(self, x):\n try: # python3\n return x.hex()\n except: # python2\n return binascii.hexlify(x)\n\n def reset_tree(self):\n self.leaves = list()\n self.levels = None\n self.is_ready = False\n\n def add_leaf(self, values, do_hash=False):\n self.is_ready = False\n # check if single leaf\n if not isinstance(values, tuple) and not isinstance(values, list):\n values = [values]\n for v in values:\n if do_hash:\n v = v.encode(\"utf-8\")\n v = self.hash_function(v).hexdigest()\n v = bytearray.fromhex(v)\n self.leaves.append(v)\n\n def get_leaf(self, index):\n return self._to_hex(self.leaves[index])\n\n def get_leaf_count(self):\n return len(self.leaves)\n\n def get_tree_ready_state(self):\n return self.is_ready\n\n def _calculate_next_level(self):\n solo_leave = None\n N = len(self.levels[0]) # number of leaves on the level\n if N % 2 == 1: # if odd number of leaves on the level\n solo_leave = self.levels[0][-1]\n N -= 1\n\n new_level = []\n for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):\n new_level.append(self.hash_function(l + r).digest())\n if solo_leave is not None:\n new_level.append(solo_leave)\n self.levels = [\n new_level,\n ] + self.levels # prepend new level\n\n def make_tree(self):\n \"\"\"\n Constructs the Merkle Tree from the leaves that have been added.\n\n This must be called after adding all the leaves and before calling\n get_merkle_root or get_proof to ensure the tree is constructed.\n \"\"\"\n self.is_ready = False\n if self.get_leaf_count() > 0:\n self.levels = [\n self.leaves,\n ]\n while len(self.levels[0]) > 1:\n self._calculate_next_level()\n self.is_ready = True\n\n def get_merkle_root(self):\n if self.is_ready:\n if self.levels is not None:\n return self._to_hex(self.levels[0][0])\n else:\n return None\n else:\n return None\n\n def get_proof(self, index):\n \"\"\"\n Generates the proof for the existence of a leaf at the specified index within the Merkle Tree.\n\n A Merkle proof is a collection of sibling hashes on the path from a leaf to the root of the tree.\n This proof can be used to independently verify that a leaf is indeed part of the Merkle tree without\n needing the entire tree. Each element of the proof shows the direction ('left' or 'right') and the\n corresponding hash that pairs with the path to the root.\n\n Parameters:\n index (int): The index of the target leaf for which to generate the Merkle proof. The index must\n correspond to the position of the leaf in the original list of leaves when the tree\n was constructed.\n\n Returns:\n list of dicts: A list where each dictionary contains a single key-value pair. The key is either\n 'left' or 'right', indicating the side of the sibling hash, and the value is a\n string representing the hexadecimal hash value of the sibling. If the tree is not\n ready or the index is out of bounds, None is returned.\n\n Raises:\n IndexError: If the index provided is not within the range of the leaves in the tree.\n ValueError: If the tree has not been constructed by calling `make_tree` method, or the index\n is not an integer.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree` and has been populated with leaves and made ready\n proof = merkle_tree.get_proof(2)\n print(proof) # Outputs something like [{'left': 'abcd...'}, {'right': 'ef01...'}]\n\n Note:\n The Merkle proof is only valid if the tree is in the ready state (`is_ready` attribute is True),\n which occurs after the `make_tree` method has been called. If the tree is not ready or the index\n is not valid, the method will return None.\n \"\"\"\n if self.levels is None:\n return None\n elif not self.is_ready or index > len(self.leaves) - 1 or index < 0:\n return None\n else:\n proof = []\n for x in range(len(self.levels) - 1, 0, -1):\n level_len = len(self.levels[x])\n if (index == level_len - 1) and (\n level_len % 2 == 1\n ): # skip if this is an odd end node\n index = int(index / 2.0)\n continue\n is_right_node = index % 2\n sibling_index = index - 1 if is_right_node else index + 1\n sibling_pos = \"left\" if is_right_node else \"right\"\n sibling_value = self._to_hex(self.levels[x][sibling_index])\n proof.append({sibling_pos: sibling_value})\n index = int(index / 2.0)\n return proof\n\n def update_leaf(self, index, new_value):\n \"\"\"\n Updates the value of a leaf at a given index in the Merkle Tree and recalculates the hashes along\n the path from the updated leaf to the root of the tree to reflect the change.\n\n This method allows the Merkle Tree to maintain integrity by ensuring that any updates to the leaf\n nodes are propagated upwards, resulting in a new Merkle root that represents the current state of\n the leaves.\n\n Parameters:\n index (int): The index of the leaf to update. The index is zero-based and must be less than\n the number of leaves in the tree.\n new_value (str): The new value in hexadecimal format to which the leaf should be updated. This\n value should be a valid hexadecimal string that represents the hashed data\n if hashing was applied to the leaves upon tree construction.\n\n Returns:\n None\n\n Raises:\n ValueError: If the tree is not ready for updates (i.e., `is_ready` is False), if the index is\n not an integer, if the new_value is not a hexadecimal string, or if the index is\n out of bounds (less than 0 or greater than or equal to the number of leaves).\n IndexError: If the index is out of the range of current leaves.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree`, populated with leaves and made ready.\n merkle_tree.update_leaf(0, 'a1b2c3d4e5f67890')\n # The leaf at index 0 is updated, and changes are propagated to the root.\n\n Note:\n The tree must have been constructed and be in a ready state before calling this method. If the\n tree has not been made by calling the `make_tree` method, or the index is invalid, this method\n will not perform an update and will return None.\n \"\"\"\n if not self.is_ready:\n return None\n new_value = bytearray.fromhex(new_value)\n self.levels[-1][index] = new_value\n for x in range(len(self.levels) - 1, 0, -1):\n parent_index = index // 2\n left_child = self.levels[x][parent_index * 2]\n try:\n right_child = self.levels[x][parent_index * 2 + 1]\n except IndexError:\n right_child = bytearray()\n self.levels[x - 1][parent_index] = self.hash_function(\n left_child + right_child\n ).digest()\n index = parent_index\n\n def serialize(self):\n \"\"\"\n Serializes the MerkleTree object into a JSON string.\n \"\"\"\n # Convert the bytearray leaves and levels to hex strings for serialization\n leaves = [self._to_hex(leaf) for leaf in self.leaves]\n levels = None\n if self.levels is not None:\n levels = []\n for level in self.levels:\n levels.append([self._to_hex(item) for item in level])\n\n # Construct a dictionary with the MerkleTree properties\n merkle_tree_data = {\n \"leaves\": leaves,\n \"levels\": levels,\n \"is_ready\": self.is_ready,\n }\n\n # Convert the dictionary to a JSON string\n return json.dumps(merkle_tree_data)\n\n @classmethod\n def deserialize(cls, json_data, hash_type=\"sha3_256\"):\n \"\"\"\n Deserializes the JSON string into a MerkleTree object.\n \"\"\"\n # Convert the JSON string back to a dictionary\n merkle_tree_data = json.loads(json_data)\n\n # Create a new MerkleTree object\n m_tree = cls(hash_type)\n\n # Convert the hex strings back to bytearrays and set the leaves and levels\n m_tree.leaves = [bytearray.fromhex(leaf) for leaf in merkle_tree_data[\"leaves\"]]\n if merkle_tree_data[\"levels\"] is not None:\n m_tree.levels = []\n for level in merkle_tree_data[\"levels\"]:\n m_tree.levels.append([bytearray.fromhex(item) for item in level])\n m_tree.is_ready = merkle_tree_data[\"is_ready\"]\n\n return m_tree" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "b64_decode", "path": "storage/shared/utils.py", "snippet": "def b64_decode(data: bytes, decode_hex: bool = False, encrypted: bool = False):\n \"\"\"\n Decodes a base64 string into a list or dictionary. If decode_hex is True, it converts any hexadecimal strings\n within the data back into bytes.\n\n Args:\n data (bytes or str): The base64 encoded data to be decoded.\n decode_hex (bool): A flag to indicate whether to decode hex strings into bytes. Defaults to False.\n\n Returns:\n list or dict: The decoded data. Returns a list if the original encoded data was a list, and a dict if it was a dict.\n\n Raises:\n ValueError: If the input is not properly base64 encoded or if hex decoding fails.\n \"\"\"\n data = data.decode(\"utf-8\") if isinstance(data, bytes) else data\n decoded_data = json.loads(\n base64.b64decode(data) if encrypted else base64.b64decode(data).decode(\"utf-8\")\n )\n if decode_hex:\n try:\n decoded_data = (\n [bytes.fromhex(d) for d in decoded_data]\n if isinstance(decoded_data, list)\n else {k: bytes.fromhex(v) for k, v in decoded_data.items()}\n )\n except:\n pass\n return decoded_data" }, { "identifier": "chunk_data", "path": "storage/shared/utils.py", "snippet": "def chunk_data(data: bytes, chunksize: int) -> List[bytes]:\n \"\"\"\n Generator function that chunks the given data into pieces of a specified size.\n\n Args:\n data (bytes): The binary data to be chunked.\n chunksize (int): The size of each chunk in bytes.\n\n Yields:\n bytes: A chunk of the data with the size equal to 'chunksize' or the remaining size of data.\n\n Raises:\n ValueError: If 'chunksize' is less than or equal to 0.\n \"\"\"\n for i in range(0, len(data), chunksize):\n yield data[i : i + chunksize]" }, { "identifier": "safe_key_search", "path": "storage/shared/utils.py", "snippet": "async def safe_key_search(database: aioredis.Redis, pattern: str) -> List[str]:\n \"\"\"\n Safely search for keys in the database that doesn't block.\n `scan_iter` uses cursor under the hood.\n \"\"\"\n return [key for key in await database.scan_iter(pattern)]" }, { "identifier": "run", "path": "storage/miner/run.py", "snippet": "def run(self):\n \"\"\"\n Initiates and manages the main loop for the miner on the Bittensor network.\n\n This function performs the following primary tasks:\n 1. Check for registration on the Bittensor network.\n 2. Attaches the miner's forward, blacklist, and priority functions to its axon.\n 3. Starts the miner's axon, making it active on the network.\n 4. Regularly updates the metagraph with the latest network state.\n 5. Optionally sets weights on the network, defining how much trust to assign to other nodes.\n 6. Handles graceful shutdown on keyboard interrupts and logs unforeseen errors.\n\n The miner continues its operations until `should_exit` is set to True or an external interruption occurs.\n During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its\n knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active\n and up-to-date with the network's latest state.\n\n Note:\n - The function leverages the global configurations set during the initialization of the miner.\n - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.\n\n Raises:\n KeyboardInterrupt: If the miner is stopped by a manual interruption.\n Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis.\n \"\"\"\n block_handler_substrate = SubstrateInterface(\n ss58_format=bt.__ss58_format__,\n use_remote_preset=True,\n url=self.subtensor.chain_endpoint,\n type_registry=bt.__type_registry__,\n )\n\n netuid = self.config.netuid\n\n # --- Check for registration.\n if not self.subtensor.is_hotkey_registered(\n netuid=netuid,\n hotkey_ss58=self.wallet.hotkey.ss58_address,\n ):\n bt.logging.error(\n f\"Wallet: {self.wallet} is not registered on netuid {netuid}\"\n f\"Please register the hotkey using `btcli subnets register` before trying again\"\n )\n exit()\n\n tempo = block_handler_substrate.query(\n module=\"SubtensorModule\", storage_function=\"Tempo\", params=[netuid]\n ).value\n\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n should_retry = False\n\n def handler(obj, update_nr, subscription_id):\n current_block = obj[\"header\"][\"number\"]\n block_hash = block_handler_substrate.get_block_hash(current_block)\n bt.logging.debug(f\"New block #{current_block}\")\n\n bt.logging.debug(\n f\"Blocks since epoch: {(current_block + netuid + 1) % (tempo + 1)}\"\n )\n\n nonlocal last_extrinsic_hash\n nonlocal checked_extrinsics_count\n nonlocal should_retry\n\n if last_extrinsic_hash != None:\n try:\n receipt = block_handler_substrate.retrieve_extrinsic_by_hash(\n block_hash, last_extrinsic_hash\n )\n bt.logging.debug(\n f\"Last set-weights call: {'Success' if receipt.is_success else format('Failure, reason: %s', receipt.error_message['name'] if receipt.error_message != None else 'nil')}\"\n )\n\n should_retry = False\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n except Exception as e:\n checked_extrinsics_count += 1\n bt.logging.debug(f\"An error occurred, extrinsic not found in block.\")\n finally:\n if checked_extrinsics_count >= 20:\n should_retry = True\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n\n if ((current_block + netuid + 1) % (tempo + 1) == 0) or should_retry:\n bt.logging.info(\n f\"New epoch started, setting weights at block {current_block}\"\n )\n with self.subtensor.substrate as substrate:\n call = substrate.compose_call(\n call_module=\"SubtensorModule\",\n call_function=\"set_weights\",\n call_params={\n \"dests\": [self.my_subnet_uid],\n \"weights\": [65535],\n \"netuid\": netuid,\n \"version_key\": 1,\n },\n )\n\n # Period dictates how long the extrinsic will stay as part of waiting pool\n extrinsic = substrate.create_signed_extrinsic(\n call=call, keypair=self.wallet.hotkey, era={\"period\": 1000}\n )\n\n dry_run = runtime_call(\n substrate=substrate,\n api=\"TaggedTransactionQueue\",\n method=\"validate_transaction\",\n params=[\"InBlock\", extrinsic, block_hash],\n block_hash=block_hash,\n )\n bt.logging.debug(dry_run)\n\n response = substrate.submit_extrinsic(\n extrinsic,\n wait_for_inclusion=False,\n wait_for_finalization=False,\n )\n\n result_data = substrate.rpc_request(\"author_pendingExtrinsics\", [])\n for extrinsic_data in result_data[\"result\"]:\n extrinsic = substrate.runtime_config.create_scale_object(\n \"Extrinsic\", metadata=substrate.metadata\n )\n extrinsic.decode(\n ScaleBytes(extrinsic_data),\n check_remaining=substrate.config.get(\"strict_scale_decode\"),\n )\n\n if extrinsic.value[\"extrinsic_hash\"] == response.extrinsic_hash:\n bt.logging.debug(\n \"Weights transaction is in the pending transaction pool\"\n )\n\n last_extrinsic_hash = response.extrinsic_hash\n should_retry = False\n\n # --- Update the miner storage information periodically.\n if not should_retry:\n update_storage_stats(self)\n bt.logging.debug(\"Storage statistics updated...\")\n\n if self.should_exit:\n return True\n\n block_handler_substrate.subscribe_block_headers(handler)" }, { "identifier": "set_weights", "path": "storage/miner/set_weights.py", "snippet": "def set_weights_for_miner(\n subtensor: \"bt.subtensor\",\n netuid: int,\n uid: int,\n wallet: \"bt.wallet\",\n metagraph: \"bt.metagraph\",\n wandb_on: bool = False,\n tempo: int = 360,\n wait_for_inclusion: bool = False,\n wait_for_finalization: bool = False,\n) -> bool:" }, { "identifier": "compute_subsequent_commitment", "path": "storage/miner/utils.py", "snippet": "def compute_subsequent_commitment(data, previous_seed, new_seed, verbose=False):\n \"\"\"\n Computes a new commitment based on provided data and a change from an old seed to a new seed.\n This function is typically used in cryptographic operations to update commitments without\n altering the underlying data.\n\n Parameters:\n - data: The original data for which the commitment is being updated.\n - previous_seed: The seed used in the previous commitment.\n - new_seed: The seed to be used for the new commitment.\n - verbose (bool): If True, additional debug information will be printed. Defaults to False.\n\n Returns:\n - A tuple containing the new commitment and the proof of the old commitment.\n\n If verbose is set to True, debug information about the types and contents of the parameters\n will be printed to aid in debugging.\n \"\"\"\n if verbose:\n bt.logging.debug(\"IN COMPUTE SUBESEQUENT COMMITMENT\")\n bt.logging.debug(\"type of data :\", type(data))\n bt.logging.debug(\"type of prev_seed:\", type(previous_seed))\n bt.logging.debug(\"type of new_seed :\", type(new_seed))\n proof = hash_data(data + previous_seed)\n return hash_data(str(proof).encode(\"utf-8\") + new_seed), proof" }, { "identifier": "save_data_to_filesystem", "path": "storage/miner/utils.py", "snippet": "def save_data_to_filesystem(data, directory, filename):\n \"\"\"\n Saves data to the filesystem at the specified directory and filename. If the directory does\n not exist, it is created.\n\n Parameters:\n - data: The data to be saved.\n - directory (str): The directory path where the data should be saved.\n - filename (str): The name of the file to save the data in.\n\n Returns:\n - file_path (str): The full path to the saved file.\n\n This function is useful for persisting data to the disk.\n \"\"\"\n # Ensure the directory exists\n directory = os.path.expanduser(directory)\n os.makedirs(directory, exist_ok=True)\n file_path = os.path.join(directory, filename)\n with open(file_path, \"wb\") as file:\n file.write(data)\n return file_path" }, { "identifier": "load_from_filesystem", "path": "storage/miner/utils.py", "snippet": "def load_from_filesystem(filepath):\n \"\"\"\n Loads data from a file in the filesystem.\n\n Parameters:\n - filepath (str): The path to the file from which data is to be loaded.\n\n Returns:\n - data: The data read from the file.\n\n This function is a straightforward utility for reading binary data from a file.\n \"\"\"\n with open(os.path.expanduser(filepath), \"rb\") as file:\n data = file.read()\n return data" }, { "identifier": "commit_data_with_seed", "path": "storage/miner/utils.py", "snippet": "def commit_data_with_seed(committer, data_chunks, n_chunks, seed):\n \"\"\"\n Commits chunks of data with a seed using a Merkle tree structure to create a proof of\n integrity for each chunk. This function is used in environments where the integrity\n and order of data need to be verifiable.\n\n Parameters:\n - committer: The committing object, which should have a commit method.\n - data_chunks (list): A list of data chunks to be committed.\n - n_chunks (int): The number of chunks expected to be committed.\n - seed: A seed value that is combined with data chunks before commitment.\n\n Returns:\n - randomness (list): A list of randomness values associated with each data chunk's commitment.\n - chunks (list): The list of original data chunks that were committed.\n - points (list): A list of commitment points in hex format.\n - merkle_tree (MerkleTree): A Merkle tree constructed from the commitment points.\n\n This function handles the conversion of commitment points to hex format and adds them to the\n Merkle tree. The completed tree represents the combined commitments.\n \"\"\"\n merkle_tree = MerkleTree()\n\n # Commit each chunk of data\n randomness, chunks, points = [None] * n_chunks, [None] * n_chunks, [None] * n_chunks\n for index, chunk in enumerate(data_chunks):\n c, m_val, r = committer.commit(chunk + str(seed).encode())\n c_hex = ecc_point_to_hex(c)\n randomness[index] = r\n chunks[index] = chunk\n points[index] = c_hex\n merkle_tree.add_leaf(c_hex)\n\n # Create the tree from the leaves\n merkle_tree.make_tree()\n return randomness, chunks, points, merkle_tree" }, { "identifier": "init_wandb", "path": "storage/miner/utils.py", "snippet": "def init_wandb(self, reinit=False):\n \"\"\"Starts a new wandb run.\"\"\"\n tags = [\n self.wallet.hotkey.ss58_address,\n storage.__version__,\n str(storage.__spec_version__),\n f\"netuid_{self.metagraph.netuid}\",\n ]\n\n if self.config.mock:\n tags.append(\"mock\")\n\n wandb_config = {\n key: copy.deepcopy(self.config.get(key, None))\n for key in (\"neuron\", \"reward\", \"netuid\", \"wandb\")\n }\n\n if wandb_config[\"neuron\"] is not None:\n wandb_config[\"neuron\"].pop(\"full_path\", None)\n\n self.wandb = wandb.init(\n anonymous=\"allow\",\n reinit=reinit,\n project=self.config.wandb.project_name,\n entity=self.config.wandb.entity,\n config=wandb_config,\n mode=\"offline\" if self.config.wandb.offline else \"online\",\n dir=self.config.neuron.full_path\n if self.config.neuron is not None\n else \"wandb_logs\",\n tags=tags,\n notes=self.config.wandb.notes,\n )\n bt.logging.success(\n prefix=\"Started a new wandb run\",\n sufix=f\"<blue> {self.wandb.name} </blue>\",\n )" }, { "identifier": "get_directory_size", "path": "storage/miner/utils.py", "snippet": "def get_directory_size(path):\n \"\"\"\n Calculates the total size of files in a specified directory.\n\n This function traverses the directory at the given path, including all subdirectories, and sums up the size\n of each file to calculate the total directory size.\n\n Args:\n path (str): The file path of the directory whose size is to be calculated.\n\n Returns:\n int: The total size of the directory in bytes (B).\n\n Usage:\n directory_size_gb = get_directory_size('/path/to/directory')\n \"\"\"\n total_size = 0\n path = os.path.expanduser(path)\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n return total_size" }, { "identifier": "get_free_disk_space", "path": "storage/miner/utils.py", "snippet": "def get_free_disk_space(path=\".\"):\n \"\"\"\n Retrieves the free disk space for the drive containing the specified path.\n\n This function provides the free disk space of the drive on which the specified path resides.\n It's useful for understanding the storage capacity and usage of the system where the miner is running.\n\n Args:\n path (str): A file path on the drive whose free disk space is to be fetched. Typically, you can\n provide the root path ('/') to get the stats for the primary drive.\n\n Returns:\n int: The free space on the disk in bytes (B).\n\n Usage:\n free_disk_space_gb = get_free_disk_space('/')\n \"\"\"\n stats = get_disk_space_stats(path)\n free = stats.get(\"free_bytes\", 0)\n return free" }, { "identifier": "update_storage_stats", "path": "storage/miner/utils.py", "snippet": "def update_storage_stats(self):\n \"\"\"\n Updates the miner's storage statistics.\n\n This function updates the miner's storage statistics, including the free disk space, current storage usage,\n and percent disk usage. It's useful for understanding the storage capacity and usage of the system where\n the miner is running.\n \"\"\"\n\n self.free_memory = get_free_disk_space()\n bt.logging.info(f\"Free memory: {self.free_memory} bytes\")\n self.current_storage_usage = get_directory_size(self.config.database.directory)\n bt.logging.info(f\"Miner storage usage: {self.current_storage_usage} bytes\")\n self.percent_disk_usage = self.current_storage_usage / self.free_memory\n bt.logging.info(f\"Miner % disk usage : {100 * self.percent_disk_usage:.3f}%\")" }, { "identifier": "config", "path": "storage/miner/config.py", "snippet": "def config(cls):\n parser = argparse.ArgumentParser()\n bt.subtensor.add_args(parser)\n bt.logging.add_args(parser)\n bt.wallet.add_args(parser)\n bt.axon.add_args(parser)\n cls.add_args(parser)\n return bt.config(parser)" }, { "identifier": "check_config", "path": "storage/miner/config.py", "snippet": "def check_config(cls, config: \"bt.Config\"):\n r\"\"\"Checks/validates the config namespace object.\"\"\"\n bt.logging.check_config(config)\n\n if config.mock:\n config.wallet._mock = True\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n full_path = os.path.expanduser(\n \"{}/{}/{}/netuid{}/{}\".format(\n config.logging.logging_dir,\n config.wallet.name,\n config.wallet.hotkey,\n config.netuid,\n config.miner.name,\n )\n )\n log_path = os.path.join(full_path, \"logs\", timestamp)\n\n config.miner.log_path = os.path.expanduser(log_path)\n config.miner.full_path = os.path.expanduser(full_path)\n\n if not os.path.exists(config.miner.full_path):\n os.makedirs(config.miner.full_path, exist_ok=True)\n if not os.path.exists(config.miner.log_path):\n os.makedirs(config.miner.log_path, exist_ok=True)\n\n if not config.miner.dont_save_events:\n # Add custom event logger for the events.\n logger.level(\"EVENTS\", no=38, icon=\"📝\")\n logger.add(\n config.miner.full_path + \"/\" + \"EVENTS.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"EVENTS\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"INFO.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"INFO\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"DEBUG.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"DEBUG\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"TRACE.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"TRACE\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )" }, { "identifier": "add_args", "path": "storage/miner/config.py", "snippet": "def add_args(cls, parser):\n parser.add_argument(\"--netuid\", type=int, default=21, help=\"The chain subnet uid.\")\n parser.add_argument(\"--test\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--miner.name\",\n type=str,\n help=\"Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name. \",\n default=\"core_storage_miner\",\n )\n parser.add_argument(\n \"--miner.device\",\n type=str,\n help=\"Device to run the validator on.\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--miner.verbose\", default=False, action=\"store_true\")\n\n parser.add_argument(\n \"--database.host\", default=\"localhost\", help=\"The host of the redis database.\"\n )\n parser.add_argument(\n \"--database.port\",\n type=int,\n default=6379,\n help=\"The port of the redis database.\",\n )\n parser.add_argument(\n \"--database.index\",\n type=int,\n default=0,\n help=\"The index of the redis database.\",\n )\n parser.add_argument(\n \"--database.directory\",\n default=\"~/.data\",\n help=\"The directory to store data in.\",\n )\n\n # Run config.\n parser.add_argument(\n \"--miner.set_weights_wait_for_inclusion\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to enter a block\",\n default=False,\n )\n parser.add_argument(\n \"--miner.set_weights_wait_for_finalization\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to be finalized on the chain\",\n default=False,\n )\n parser.add_argument(\n \"--miner.seconds_to_wait_to_log_presence_message\",\n type=int,\n help=\"How many seconds to wait before logging a presence message.\",\n default=4,\n )\n\n # Blacklist.\n parser.add_argument(\n \"--miner.blacklist.blacklist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Blacklist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.whitelist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Whitelist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.force_validator_permit\",\n action=\"store_true\",\n help=\"Only allow requests from validators\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.allow_non_registered\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.minimum_stake_requirement\",\n type=float,\n help=\"Minimum stake requirement\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.blacklist.min_request_period\",\n type=int,\n help=\"Time period (in minute) to serve a maximum of 50 requests for each hotkey\",\n default=5,\n )\n\n # Priority.\n parser.add_argument(\n \"--miner.priority.default\",\n type=float,\n help=\"Default priority of non-registered requests\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.priority.time_stake_multiplicate\",\n type=int,\n help=\"Time (in minute) it takes to make the stake twice more important in the priority queue\",\n default=10,\n )\n parser.add_argument(\n \"--miner.priority.len_request_timestamps\",\n type=int,\n help=\"Number of historic request timestamps to record\",\n default=50,\n )\n # Switches.\n parser.add_argument(\n \"--miner.no_set_weights\",\n action=\"store_true\",\n help=\"If True, the miner does not set weights.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_serve\",\n action=\"store_true\",\n help=\"If True, the miner doesnt serve the axon.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_start_axon\",\n action=\"store_true\",\n help=\"If True, the miner doesnt start the axon.\",\n default=False,\n )\n\n # Mocks.\n parser.add_argument(\n \"--miner.mock_subtensor\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n\n # Wandb args\n parser.add_argument(\n \"--wandb.off\", action=\"store_true\", help=\"Turn off wandb.\", default=False\n )\n parser.add_argument(\n \"--wandb.project_name\",\n type=str,\n help=\"The name of the project where you are sending the new run.\",\n default=\"philanthropic-thunder\",\n )\n parser.add_argument(\n \"--wandb.entity\",\n type=str,\n help=\"An entity is a username or team name where youre sending runs.\",\n default=\"philanthrope\",\n )\n parser.add_argument(\n \"--wandb.offline\",\n action=\"store_true\",\n help=\"Runs wandb in offline mode.\",\n default=False,\n )\n parser.add_argument(\n \"--wandb.weights_step_length\",\n type=int,\n help=\"How many steps before we log the weights.\",\n default=10,\n )\n parser.add_argument(\n \"--wandb.run_step_length\",\n type=int,\n help=\"How many steps before we rollover to a new run.\",\n default=1500,\n )\n parser.add_argument(\n \"--wandb.notes\",\n type=str,\n help=\"Notes to add to the wandb run.\",\n default=\"\",\n )" }, { "identifier": "store_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def store_chunk_metadata(r, chunk_hash, filepath, hotkey, size, seed):\n \"\"\"\n Stores the metadata of a chunk in a Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): Miner hotkey associated with the chunk.\n size (int): The size of the chunk.\n seed (str): The seed associated with the chunk.\n\n This function stores the filepath, size (as a string), and seed for the given chunk hash.\n \"\"\"\n # Ensure that all data are in the correct format\n metadata = {\n \"filepath\": filepath,\n \"hotkey\": hotkey,\n \"size\": str(size), # Convert size to string\n \"seed\": seed, # Store seed directly\n }\n\n # Use hmset (or hset which is its modern equivalent) to store the hash\n for key, value in metadata.items():\n await r.hset(chunk_hash, key, value)" }, { "identifier": "update_seed_info", "path": "storage/miner/database.py", "snippet": "async def update_seed_info(r, chunk_hash, hotkey, seed):\n \"\"\"\n Updates the seed information for a specific chunk in the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): The caller hotkey value to be updated.\n seed (str): The new seed value to be updated.\n\n This function updates the seed information for the specified chunk hash.\n \"\"\"\n # Update the existing seed information\n await r.hset(chunk_hash, \"seed\", seed)\n await r.hset(chunk_hash, \"hotkey\", hotkey)" }, { "identifier": "get_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def get_chunk_metadata(r, chunk_hash):\n \"\"\"\n Retrieves the metadata for a specific chunk from the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n\n Returns:\n dict: A dictionary containing the chunk's metadata, including filepath, size, and seed.\n Size is converted to an integer, and seed is decoded from bytes to a string.\n \"\"\"\n metadata = await r.hgetall(chunk_hash)\n if metadata:\n metadata[b\"size\"] = int(metadata[b\"size\"])\n metadata[b\"seed\"] = metadata[b\"seed\"].decode(\"utf-8\")\n return metadata" } ]
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,988
def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem
filepath = save_data_to_filesystem(
13
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/main.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "CLIInterface", "path": "memgpt/interface.py", "snippet": "class CLIInterface(AgentInterface):\r\n \"\"\"Basic interface for dumping agent events to the command-line\"\"\"\r\n\r\n @staticmethod\r\n def important_message(msg):\r\n fstr = f\"{Fore.MAGENTA}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def warning_message(msg):\r\n fstr = f\"{Fore.RED}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n else:\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def internal_monologue(msg):\r\n # ANSI escape code for italic is '\\x1B[3m'\r\n fstr = f\"\\x1B[3m{Fore.LIGHTBLACK_EX}💭 {{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def assistant_message(msg):\r\n fstr = f\"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def memory_message(msg):\r\n fstr = f\"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def system_message(msg):\r\n fstr = f\"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def user_message(msg, raw=False, dump=False, debug=DEBUG):\r\n def print_user_message(icon, msg, printf=print):\r\n if STRIP_UI:\r\n printf(f\"{icon} {msg}\")\r\n else:\r\n printf(f\"{Fore.GREEN}{Style.BRIGHT}{icon} {Fore.GREEN}{msg}{Style.RESET_ALL}\")\r\n\r\n def printd_user_message(icon, msg):\r\n return print_user_message(icon, msg)\r\n\r\n if not (raw or dump or debug):\r\n # we do not want to repeat the message in normal use\r\n return\r\n\r\n if isinstance(msg, str):\r\n if raw:\r\n printd_user_message(\"🧑\", msg)\r\n return\r\n else:\r\n try:\r\n msg_json = json.loads(msg)\r\n except:\r\n printd(f\"{CLI_WARNING_PREFIX}failed to parse user message into json\")\r\n printd_user_message(\"🧑\", msg)\r\n return\r\n if msg_json[\"type\"] == \"user_message\":\r\n if dump:\r\n print_user_message(\"🧑\", msg_json[\"message\"])\r\n return\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"🧑\", msg_json)\r\n elif msg_json[\"type\"] == \"heartbeat\":\r\n if debug:\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"💓\", msg_json)\r\n elif dump:\r\n print_user_message(\"💓\", msg_json)\r\n return\r\n\r\n elif msg_json[\"type\"] == \"system_message\":\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"🖥️\", msg_json)\r\n else:\r\n printd_user_message(\"🧑\", msg_json)\r\n\r\n @staticmethod\r\n def function_message(msg, debug=DEBUG):\r\n def print_function_message(icon, msg, color=Fore.RED, printf=print):\r\n if STRIP_UI:\r\n printf(f\"⚡{icon} [function] {msg}\")\r\n else:\r\n printf(f\"{color}{Style.BRIGHT}⚡{icon} [function] {color}{msg}{Style.RESET_ALL}\")\r\n\r\n def printd_function_message(icon, msg, color=Fore.RED):\r\n return print_function_message(icon, msg, color, printf=(print if debug else printd))\r\n\r\n if isinstance(msg, dict):\r\n printd_function_message(\"\", msg)\r\n return\r\n\r\n if msg.startswith(\"Success\"):\r\n printd_function_message(\"🟢\", msg)\r\n elif msg.startswith(\"Error: \"):\r\n printd_function_message(\"🔴\", msg)\r\n elif msg.startswith(\"Running \"):\r\n if debug:\r\n printd_function_message(\"\", msg)\r\n else:\r\n match = re.search(r\"Running (\\w+)\\((.*)\\)\", msg)\r\n if match:\r\n function_name = match.group(1)\r\n function_args = match.group(2)\r\n if function_name in [\"archival_memory_insert\", \"archival_memory_search\", \"core_memory_replace\", \"core_memory_append\"]:\r\n if function_name in [\"archival_memory_insert\", \"core_memory_append\", \"core_memory_replace\"]:\r\n print_function_message(\"🧠\", f\"updating memory with {function_name}\")\r\n elif function_name == \"archival_memory_search\":\r\n print_function_message(\"🧠\", f\"searching memory with {function_name}\")\r\n try:\r\n msg_dict = eval(function_args)\r\n if function_name == \"archival_memory_search\":\r\n output = f'\\tquery: {msg_dict[\"query\"]}, page: {msg_dict[\"page\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Fore.RED}{output}{Style.RESET_ALL}\")\r\n elif function_name == \"archival_memory_insert\":\r\n output = f'\\t→ {msg_dict[\"content\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Style.BRIGHT}{Fore.RED}{output}{Style.RESET_ALL}\")\r\n else:\r\n if STRIP_UI:\r\n print(f'\\t {msg_dict[\"old_content\"]}\\n\\t→ {msg_dict[\"new_content\"]}')\r\n else:\r\n print(\r\n f'{Style.BRIGHT}\\t{Fore.RED} {msg_dict[\"old_content\"]}\\n\\t{Fore.GREEN}→ {msg_dict[\"new_content\"]}{Style.RESET_ALL}'\r\n )\r\n except Exception as e:\r\n printd(str(e))\r\n printd(msg_dict)\r\n pass\r\n elif function_name in [\"conversation_search\", \"conversation_search_date\"]:\r\n print_function_message(\"🧠\", f\"searching memory with {function_name}\")\r\n try:\r\n msg_dict = eval(function_args)\r\n output = f'\\tquery: {msg_dict[\"query\"]}, page: {msg_dict[\"page\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Fore.RED}{output}{Style.RESET_ALL}\")\r\n except Exception as e:\r\n printd(str(e))\r\n printd(msg_dict)\r\n pass\r\n else:\r\n printd(f\"{CLI_WARNING_PREFIX}did not recognize function message\")\r\n printd_function_message(\"\", msg)\r\n else:\r\n try:\r\n msg_dict = json.loads(msg)\r\n if \"status\" in msg_dict and msg_dict[\"status\"] == \"OK\":\r\n printd_function_message(\"\", str(msg), color=Fore.GREEN)\r\n else:\r\n printd_function_message(\"\", str(msg), color=Fore.RED)\r\n except Exception:\r\n print(f\"{CLI_WARNING_PREFIX}did not recognize function message {type(msg)} {msg}\")\r\n printd_function_message(\"\", msg)\r\n\r\n @staticmethod\r\n def print_messages(message_sequence, dump=False):\r\n idx = len(message_sequence)\r\n for msg in message_sequence:\r\n if dump:\r\n print(f\"[{idx}] \", end=\"\")\r\n idx -= 1\r\n role = msg[\"role\"]\r\n content = msg[\"content\"]\r\n\r\n if role == \"system\":\r\n CLIInterface.system_message(content)\r\n elif role == \"assistant\":\r\n # Differentiate between internal monologue, function calls, and messages\r\n if msg.get(\"function_call\"):\r\n if content is not None:\r\n CLIInterface.internal_monologue(content)\r\n # I think the next one is not up to date\r\n # function_message(msg[\"function_call\"])\r\n args = json.loads(msg[\"function_call\"].get(\"arguments\"))\r\n CLIInterface.assistant_message(args.get(\"message\"))\r\n # assistant_message(content)\r\n else:\r\n CLIInterface.internal_monologue(content)\r\n elif role == \"user\":\r\n CLIInterface.user_message(content, dump=dump)\r\n elif role == \"function\":\r\n CLIInterface.function_message(content, debug=dump)\r\n else:\r\n print(f\"Unknown role: {content}\")\r\n\r\n @staticmethod\r\n def print_messages_simple(message_sequence):\r\n for msg in message_sequence:\r\n role = msg[\"role\"]\r\n content = msg[\"content\"]\r\n\r\n if role == \"system\":\r\n CLIInterface.system_message(content)\r\n elif role == \"assistant\":\r\n CLIInterface.assistant_message(content)\r\n elif role == \"user\":\r\n CLIInterface.user_message(content, raw=True)\r\n else:\r\n print(f\"Unknown role: {content}\")\r\n\r\n @staticmethod\r\n def print_messages_raw(message_sequence):\r\n for msg in message_sequence:\r\n print(msg)\r\n\r\n @staticmethod\r\n def step_yield():\r\n pass\r" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "run", "path": "memgpt/cli/cli.py", "snippet": "def run(\n persona: str = typer.Option(None, help=\"Specify persona\"),\n agent: str = typer.Option(None, help=\"Specify agent save file\"),\n human: str = typer.Option(None, help=\"Specify human\"),\n preset: str = typer.Option(None, help=\"Specify preset\"),\n # model flags\n model: str = typer.Option(None, help=\"Specify the LLM model\"),\n model_wrapper: str = typer.Option(None, help=\"Specify the LLM model wrapper\"),\n model_endpoint: str = typer.Option(None, help=\"Specify the LLM model endpoint\"),\n model_endpoint_type: str = typer.Option(None, help=\"Specify the LLM model endpoint type\"),\n context_window: int = typer.Option(None, help=\"The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)\"),\n # other\n first: bool = typer.Option(False, \"--first\", help=\"Use --first to send the first message in the sequence\"),\n strip_ui: bool = typer.Option(False, help=\"Remove all the bells and whistles in CLI output (helpful for testing)\"),\n debug: bool = typer.Option(False, \"--debug\", help=\"Use --debug to enable debugging output\"),\n no_verify: bool = typer.Option(False, help=\"Bypass message verification\"),\n yes: bool = typer.Option(False, \"-y\", help=\"Skip confirmation prompt and use defaults\"),\n):\n \"\"\"Start chatting with an MemGPT agent\n\n Example usage: `memgpt run --agent myagent --data-source mydata --persona mypersona --human myhuman --model gpt-3.5-turbo`\n\n :param persona: Specify persona\n :param agent: Specify agent name (will load existing state if the agent exists, or create a new one with that name)\n :param human: Specify human\n :param model: Specify the LLM model\n\n \"\"\"\n\n # setup logger\n # TODO: remove Utils Debug after global logging is complete.\n utils.DEBUG = debug\n # TODO: add logging command line options for runtime log level\n\n if debug:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.CRITICAL)\n\n from memgpt.migrate import config_is_compatible, wipe_config_and_reconfigure, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n typer.secho(f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}\\n\", fg=typer.colors.RED)\n choices = [\n \"Run the full config setup (recommended)\",\n \"Create a new config using defaults\",\n \"Cancel\",\n ]\n selection = questionary.select(\n f\"To use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}), or regenerate your config. Would you like to proceed?\",\n choices=choices,\n default=choices[0],\n ).ask()\n if selection == choices[0]:\n try:\n wipe_config_and_reconfigure()\n except Exception as e:\n typer.secho(f\"Fresh config generation failed - error:\\n{e}\", fg=typer.colors.RED)\n raise\n elif selection == choices[1]:\n try:\n wipe_config_and_reconfigure(run_configure=False)\n except Exception as e:\n typer.secho(f\"Fresh config generation failed - error:\\n{e}\", fg=typer.colors.RED)\n raise\n else:\n typer.secho(\"Migration cancelled (to migrate old agents, run `memgpt migrate`)\", fg=typer.colors.RED)\n raise KeyboardInterrupt()\n\n if not MemGPTConfig.exists():\n # if no config, ask about quickstart\n # do you want to do:\n # - openai (run quickstart)\n # - memgpt hosted (run quickstart)\n # - other (run configure)\n if yes:\n # if user is passing '-y' to bypass all inputs, use memgpt hosted\n # since it can't fail out if you don't have an API key\n quickstart(backend=QuickstartChoice.memgpt_hosted)\n config = MemGPTConfig()\n\n else:\n config_choices = {\n \"memgpt\": \"Use the free MemGPT endpoints\",\n \"openai\": \"Use OpenAI (requires an OpenAI API key)\",\n \"other\": \"Other (OpenAI Azure, custom LLM endpoint, etc)\",\n }\n print()\n config_selection = questionary.select(\n \"How would you like to set up MemGPT?\",\n choices=list(config_choices.values()),\n default=config_choices[\"memgpt\"],\n ).ask()\n\n if config_selection == config_choices[\"memgpt\"]:\n print()\n quickstart(backend=QuickstartChoice.memgpt_hosted, debug=debug, terminal=False, latest=False)\n elif config_selection == config_choices[\"openai\"]:\n print()\n quickstart(backend=QuickstartChoice.openai, debug=debug, terminal=False, latest=False)\n elif config_selection == config_choices[\"other\"]:\n configure()\n else:\n raise ValueError(config_selection)\n\n config = MemGPTConfig.load()\n\n else: # load config\n config = MemGPTConfig.load()\n\n # force re-configuration is config is from old version\n if config.memgpt_version is None: # TODO: eventually add checks for older versions, if config changes again\n typer.secho(\"MemGPT has been updated to a newer version, so re-running configuration.\", fg=typer.colors.YELLOW)\n configure()\n config = MemGPTConfig.load()\n\n # read user id from config\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n user = ms.get_user(user_id=user_id)\n if user is None:\n ms.create_user(User(id=user_id))\n user = ms.get_user(user_id=user_id)\n if user is None:\n typer.secho(f\"Failed to create default user in database.\", fg=typer.colors.RED)\n sys.exit(1)\n\n # override with command line arguments\n if debug:\n config.debug = debug\n if no_verify:\n config.no_verify = no_verify\n # determine agent to use, if not provided\n if not yes and not agent:\n agents = ms.list_agents(user_id=user.id)\n agents = [a.name for a in agents]\n\n if len(agents) > 0 and not any([persona, human, model]):\n print()\n select_agent = questionary.confirm(\"Would you like to select an existing agent?\").ask()\n if select_agent is None:\n raise KeyboardInterrupt\n if select_agent:\n agent = questionary.select(\"Select agent:\", choices=agents).ask()\n\n # create agent config\n if agent and ms.get_agent(agent_name=agent, user_id=user.id): # use existing agent\n typer.secho(f\"\\n🔁 Using existing agent {agent}\", fg=typer.colors.GREEN)\n # agent_config = AgentConfig.load(agent)\n agent_state = ms.get_agent(agent_name=agent, user_id=user_id)\n printd(\"Loading agent state:\", agent_state.id)\n printd(\"Agent state:\", agent_state.state)\n # printd(\"State path:\", agent_config.save_state_dir())\n # printd(\"Persistent manager path:\", agent_config.save_persistence_manager_dir())\n # printd(\"Index path:\", agent_config.save_agent_index_dir())\n # persistence_manager = LocalStateManager(agent_config).load() # TODO: implement load\n # TODO: load prior agent state\n if persona and persona != agent_state.persona:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding existing persona {agent_state.persona} with {persona}\", fg=typer.colors.YELLOW)\n agent_state.persona = persona\n # raise ValueError(f\"Cannot override {agent_state.name} existing persona {agent_state.persona} with {persona}\")\n if human and human != agent_state.human:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding existing human {agent_state.human} with {human}\", fg=typer.colors.YELLOW)\n agent_state.human = human\n # raise ValueError(f\"Cannot override {agent_config.name} existing human {agent_config.human} with {human}\")\n\n # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)\n if model and model != agent_state.llm_config.model:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model {agent_state.llm_config.model} with {model}\", fg=typer.colors.YELLOW\n )\n agent_state.llm_config.model = model\n if context_window is not None and int(context_window) != agent_state.llm_config.context_window:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing context window {agent_state.llm_config.context_window} with {context_window}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.context_window = context_window\n if model_wrapper and model_wrapper != agent_state.llm_config.model_wrapper:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model wrapper {agent_state.llm_config.model_wrapper} with {model_wrapper}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_wrapper = model_wrapper\n if model_endpoint and model_endpoint != agent_state.llm_config.model_endpoint:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint {agent_state.llm_config.model_endpoint} with {model_endpoint}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_endpoint = model_endpoint\n if model_endpoint_type and model_endpoint_type != agent_state.llm_config.model_endpoint_type:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {agent_state.llm_config.model_endpoint_type} with {model_endpoint_type}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_endpoint_type = model_endpoint_type\n\n # Update the agent with any overrides\n ms.update_agent(agent_state)\n\n # create agent\n memgpt_agent = Agent(agent_state, interface=interface)\n\n else: # create new agent\n # create new agent config: override defaults with args if provided\n typer.secho(\"\\n🧬 Creating new agent...\", fg=typer.colors.WHITE)\n\n if agent is None:\n # determine agent name\n # agent_count = len(ms.list_agents(user_id=user.id))\n # agent = f\"agent_{agent_count}\"\n agent = utils.create_random_username()\n\n llm_config = config.default_llm_config\n embedding_config = config.default_embedding_config # TODO allow overriding embedding params via CLI run\n\n # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)\n if model and model != llm_config.model:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding default model {llm_config.model} with {model}\", fg=typer.colors.YELLOW)\n llm_config.model = model\n if context_window is not None and int(context_window) != llm_config.context_window:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding default context window {llm_config.context_window} with {context_window}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.context_window = context_window\n if model_wrapper and model_wrapper != llm_config.model_wrapper:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model wrapper {llm_config.model_wrapper} with {model_wrapper}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_wrapper = model_wrapper\n if model_endpoint and model_endpoint != llm_config.model_endpoint:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint {llm_config.model_endpoint} with {model_endpoint}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_endpoint = model_endpoint\n if model_endpoint_type and model_endpoint_type != llm_config.model_endpoint_type:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {llm_config.model_endpoint_type} with {model_endpoint_type}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_endpoint_type = model_endpoint_type\n\n agent_state = AgentState(\n name=agent,\n user_id=user.id,\n persona=persona if persona else user.default_persona,\n human=human if human else user.default_human,\n preset=preset if preset else user.default_preset,\n llm_config=llm_config,\n embedding_config=embedding_config,\n )\n ms.create_agent(agent_state)\n\n typer.secho(f\"-> 🤖 Using persona profile '{agent_state.persona}'\", fg=typer.colors.WHITE)\n typer.secho(f\"-> 🧑 Using human profile '{agent_state.human}'\", fg=typer.colors.WHITE)\n\n # Supress llama-index noise\n # TODO(swooders) add persistence manager code? or comment out?\n # with suppress_stdout():\n # TODO: allow configrable state manager (only local is supported right now)\n # persistence_manager = LocalStateManager(agent_config) # TODO: insert dataset/pre-fill\n\n # create agent\n try:\n memgpt_agent = presets.create_agent_from_preset(\n agent_state=agent_state,\n interface=interface,\n )\n except ValueError as e:\n # TODO(swooders) what's the equivalent cleanup code for the new DB refactor?\n typer.secho(f\"Failed to create agent from provided information:\\n{e}\", fg=typer.colors.RED)\n # # Delete the directory of the failed agent\n # try:\n # # Path to the specific file\n # agent_config_file = agent_config.agent_config_path\n\n # # Check if the file exists\n # if os.path.isfile(agent_config_file):\n # # Delete the file\n # os.remove(agent_config_file)\n\n # # Now, delete the directory along with any remaining files in it\n # agent_save_dir = os.path.join(MEMGPT_DIR, \"agents\", agent_config.name)\n # shutil.rmtree(agent_save_dir)\n # except:\n # typer.secho(f\"Failed to delete agent directory during cleanup:\\n{e}\", fg=typer.colors.RED)\n sys.exit(1)\n typer.secho(f\"🎉 Created new agent '{agent_state.name}'\", fg=typer.colors.GREEN)\n\n # pretty print agent config\n # printd(json.dumps(vars(agent_config), indent=4, sort_keys=True, ensure_ascii=JSON_ENSURE_ASCII))\n # printd(json.dumps(agent_init_state), indent=4, sort_keys=True, ensure_ascii=JSON_ENSURE_ASCII))\n\n # configure llama index\n original_stdout = sys.stdout # unfortunate hack required to suppress confusing print statements from llama index\n sys.stdout = io.StringIO()\n embed_model = embedding_model(config=agent_state.embedding_config, user_id=user.id)\n service_context = ServiceContext.from_defaults(\n llm=None, embed_model=embed_model, chunk_size=agent_state.embedding_config.embedding_chunk_size\n )\n set_global_service_context(service_context)\n sys.stdout = original_stdout\n\n # start event loop\n from memgpt.main import run_agent_loop\n\n print() # extra space\n run_agent_loop(memgpt_agent, config, first, ms, no_verify) # TODO: add back no_verify" }, { "identifier": "attach", "path": "memgpt/cli/cli.py", "snippet": "def attach(\n agent: str = typer.Option(help=\"Specify agent to attach data to\"),\n data_source: str = typer.Option(help=\"Data source to attach to avent\"),\n user_id: uuid.UUID = None,\n):\n # use client ID is no user_id provided\n config = MemGPTConfig.load()\n if user_id is None:\n user_id = uuid.UUID(config.anon_clientid)\n try:\n # loads the data contained in data source into the agent's memory\n from memgpt.agent_store.storage import StorageConnector, TableType\n from tqdm import tqdm\n\n ms = MetadataStore(config)\n agent = ms.get_agent(agent_name=agent, user_id=user_id)\n source = ms.get_source(source_name=data_source, user_id=user_id)\n assert source is not None, f\"Source {data_source} does not exist for user {user_id}\"\n\n # get storage connectors\n with suppress_stdout():\n source_storage = StorageConnector.get_storage_connector(TableType.PASSAGES, config, user_id=user_id)\n dest_storage = StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n\n size = source_storage.size({\"data_source\": data_source})\n typer.secho(f\"Ingesting {size} passages into {agent.name}\", fg=typer.colors.GREEN)\n page_size = 100\n generator = source_storage.get_all_paginated(filters={\"data_source\": data_source}, page_size=page_size) # yields List[Passage]\n passages = []\n for i in tqdm(range(0, size, page_size)):\n passages = next(generator)\n print(\"inserting\", passages)\n\n # need to associated passage with agent (for filtering)\n for passage in passages:\n passage.agent_id = agent.id\n\n # insert into agent archival memory\n dest_storage.insert_many(passages)\n\n # save destination storage\n dest_storage.save()\n\n # attach to agent\n source_id = ms.get_source(source_name=data_source, user_id=user_id).id\n ms.attach_source(agent_id=agent.id, source_id=source_id, user_id=user_id)\n\n total_agent_passages = dest_storage.size()\n\n typer.secho(\n f\"Attached data source {data_source} to agent {agent}, consisting of {len(passages)}. Agent now has {total_agent_passages} embeddings in archival memory.\",\n fg=typer.colors.GREEN,\n )\n except KeyboardInterrupt:\n typer.secho(\"Operation interrupted by KeyboardInterrupt.\", fg=typer.colors.YELLOW)" }, { "identifier": "version", "path": "memgpt/cli/cli.py", "snippet": "def version():\n import memgpt\n\n print(memgpt.__version__)\n return memgpt.__version__" }, { "identifier": "server", "path": "memgpt/cli/cli.py", "snippet": "def server(\n type: ServerChoice = typer.Option(\"rest\", help=\"Server to run\"),\n port: int = typer.Option(None, help=\"Port to run the server on\"),\n host: str = typer.Option(None, help=\"Host to run the server on (default to localhost)\"),\n debug: bool = typer.Option(True, help=\"Turn debugging output on\"),\n):\n \"\"\"Launch a MemGPT server process\"\"\"\n\n if debug:\n from memgpt.server.server import logger as server_logger\n\n # Set the logging level\n server_logger.setLevel(logging.DEBUG)\n # Create a StreamHandler\n stream_handler = logging.StreamHandler()\n # Set the formatter (optional)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n stream_handler.setFormatter(formatter)\n # Add the handler to the logger\n server_logger.addHandler(stream_handler)\n\n if type == ServerChoice.rest_api:\n import uvicorn\n from memgpt.server.rest_api.server import app\n\n try:\n # Start the subprocess in a new session\n uvicorn.run(app, host=host or \"localhost\", port=port or REST_DEFAULT_PORT)\n\n except KeyboardInterrupt:\n # Handle CTRL-C\n print(\"Terminating the server...\")\n sys.exit(0)\n\n elif type == ServerChoice.ws_api:\n if port is None:\n port = WS_DEFAULT_PORT\n\n # Change to the desired directory\n script_path = Path(__file__).resolve()\n script_dir = script_path.parent\n\n server_directory = os.path.join(script_dir.parent, \"server\", \"ws_api\")\n command = f\"python server.py {port}\"\n\n # Run the command\n print(f\"Running WS (websockets) server: {command} (inside {server_directory})\")\n\n try:\n # Start the subprocess in a new session\n process = subprocess.Popen(command, shell=True, start_new_session=True, cwd=server_directory)\n process.wait()\n except KeyboardInterrupt:\n # Handle CTRL-C\n print(\"Terminating the server...\")\n process.terminate()\n try:\n process.wait(timeout=5)\n except subprocess.TimeoutExpired:\n process.kill()\n print(\"Server terminated with kill()\")\n sys.exit(0)" }, { "identifier": "open_folder", "path": "memgpt/cli/cli.py", "snippet": "def open_folder():\n \"\"\"Open a folder viewer of the MemGPT home directory\"\"\"\n try:\n print(f\"Opening home folder: {MEMGPT_DIR}\")\n open_folder_in_explorer(MEMGPT_DIR)\n except Exception as e:\n print(f\"Failed to open folder with system viewer, error:\\n{e}\")" }, { "identifier": "quickstart", "path": "memgpt/cli/cli.py", "snippet": "def quickstart(\n backend: QuickstartChoice = typer.Option(\"memgpt\", help=\"Quickstart setup backend\"),\n latest: bool = typer.Option(False, \"--latest\", help=\"Use --latest to pull the latest config from online\"),\n debug: bool = typer.Option(False, \"--debug\", help=\"Use --debug to enable debugging output\"),\n terminal: bool = True,\n):\n \"\"\"Set the base config file with a single command\"\"\"\n\n # setup logger\n utils.DEBUG = debug\n logging.getLogger().setLevel(logging.CRITICAL)\n if debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n # make sure everything is set up properly\n MemGPTConfig.create_config_dir()\n credentials = MemGPTCredentials.load()\n\n config_was_modified = False\n if backend == QuickstartChoice.memgpt_hosted:\n # if latest, try to pull the config from the repo\n # fallback to using local\n if latest:\n # Download the latest memgpt hosted config\n url = \"https://raw.githubusercontent.com/cpacker/MemGPT/main/memgpt/configs/memgpt_hosted.json\"\n response = requests.get(url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Parse the response content as JSON\n config = response.json()\n # Output a success message and the first few items in the dictionary as a sample\n printd(\"JSON config file downloaded successfully.\")\n config_was_modified = set_config_with_dict(config)\n else:\n typer.secho(f\"Failed to download config from {url}. Status code: {response.status_code}\", fg=typer.colors.RED)\n\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"memgpt_hosted.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded backup config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Backup config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n else:\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"memgpt_hosted.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n\n elif backend == QuickstartChoice.openai:\n # Make sure we have an API key\n api_key = os.getenv(\"OPENAI_API_KEY\")\n while api_key is None or len(api_key) == 0:\n # Ask for API key as input\n api_key = questionary.password(\"Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):\").ask()\n credentials.openai_key = api_key\n credentials.save()\n\n # if latest, try to pull the config from the repo\n # fallback to using local\n if latest:\n url = \"https://raw.githubusercontent.com/cpacker/MemGPT/main/memgpt/configs/openai.json\"\n response = requests.get(url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Parse the response content as JSON\n config = response.json()\n # Output a success message and the first few items in the dictionary as a sample\n print(\"JSON config file downloaded successfully.\")\n config_was_modified = set_config_with_dict(config)\n else:\n typer.secho(f\"Failed to download config from {url}. Status code: {response.status_code}\", fg=typer.colors.RED)\n\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"openai.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded backup config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Backup config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n else:\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"openai.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n\n else:\n raise NotImplementedError(backend)\n\n # 'terminal' = quickstart was run alone, in which case we should guide the user on the next command\n if terminal:\n if config_was_modified:\n typer.secho('⚡ Run \"memgpt run\" to create an agent with the new config.', fg=typer.colors.YELLOW)\n else:\n typer.secho('⚡ Run \"memgpt run\" to create an agent.', fg=typer.colors.YELLOW)" }, { "identifier": "migrate", "path": "memgpt/cli/cli.py", "snippet": "def migrate():\n \"\"\"Migrate old agents (pre 0.2.12) to the new database system\"\"\"\n migrate_all_agents()\n migrate_all_sources()" }, { "identifier": "configure", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef configure():\n \"\"\"Updates default MemGPT configurations\"\"\"\n\n # check credentials\n credentials = MemGPTCredentials.load()\n openai_key = get_openai_credentials()\n azure_creds = get_azure_credentials()\n\n MemGPTConfig.create_config_dir()\n\n # Will pre-populate with defaults, or what the user previously set\n config = MemGPTConfig.load()\n try:\n model_endpoint_type, model_endpoint = configure_llm_endpoint(\n config=config,\n credentials=credentials,\n )\n model, model_wrapper, context_window = configure_model(\n config=config,\n credentials=credentials,\n model_endpoint_type=model_endpoint_type,\n model_endpoint=model_endpoint,\n )\n embedding_endpoint_type, embedding_endpoint, embedding_dim, embedding_model = configure_embedding_endpoint(\n config=config,\n credentials=credentials,\n )\n default_preset, default_persona, default_human, default_agent = configure_cli(\n config=config,\n credentials=credentials,\n )\n archival_storage_type, archival_storage_uri, archival_storage_path = configure_archival_storage(\n config=config,\n credentials=credentials,\n )\n recall_storage_type, recall_storage_uri, recall_storage_path = configure_recall_storage(\n config=config,\n credentials=credentials,\n )\n except ValueError as e:\n typer.secho(str(e), fg=typer.colors.RED)\n return\n\n # openai key might have gotten added along the way\n openai_key = credentials.openai_key if credentials.openai_key is not None else openai_key\n\n # TODO: remove most of this (deplicated with User table)\n config = MemGPTConfig(\n default_llm_config=LLMConfig(\n model=model,\n model_endpoint=model_endpoint,\n model_endpoint_type=model_endpoint_type,\n model_wrapper=model_wrapper,\n context_window=context_window,\n ),\n default_embedding_config=EmbeddingConfig(\n embedding_endpoint_type=embedding_endpoint_type,\n embedding_endpoint=embedding_endpoint,\n embedding_dim=embedding_dim,\n embedding_model=embedding_model,\n ),\n # cli configs\n preset=default_preset,\n persona=default_persona,\n human=default_human,\n agent=default_agent,\n # storage\n archival_storage_type=archival_storage_type,\n archival_storage_uri=archival_storage_uri,\n archival_storage_path=archival_storage_path,\n # recall storage\n recall_storage_type=recall_storage_type,\n recall_storage_uri=recall_storage_uri,\n recall_storage_path=recall_storage_path,\n # metadata storage (currently forced to match recall storage)\n metadata_storage_type=recall_storage_type,\n metadata_storage_uri=recall_storage_uri,\n metadata_storage_path=recall_storage_path,\n )\n\n typer.secho(f\"📖 Saving config to {config.config_path}\", fg=typer.colors.GREEN)\n config.save()\n\n # create user records\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n user = User(\n id=uuid.UUID(config.anon_clientid),\n default_preset=default_preset,\n default_persona=default_persona,\n default_human=default_human,\n default_agent=default_agent,\n )\n if ms.get_user(user_id):\n # update user\n ms.update_user(user)\n else:\n ms.create_user(user)" }, { "identifier": "list", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef list(arg: Annotated[ListChoice, typer.Argument]):\n config = MemGPTConfig.load()\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n if arg == ListChoice.agents:\n \"\"\"List all agents\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Model\", \"Persona\", \"Human\", \"Data Source\", \"Create Time\"]\n for agent in tqdm(ms.list_agents(user_id=user_id)):\n source_ids = ms.list_attached_sources(agent_id=agent.id)\n source_names = [ms.get_source(source_id=source_id).name for source_id in source_ids]\n table.add_row(\n [\n agent.name,\n agent.llm_config.model,\n agent.persona,\n agent.human,\n \",\".join(source_names),\n utils.format_datetime(agent.created_at),\n ]\n )\n print(table)\n elif arg == ListChoice.humans:\n \"\"\"List all humans\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Text\"]\n for human_file in utils.list_human_files():\n text = open(human_file, \"r\").read()\n name = os.path.basename(human_file).replace(\"txt\", \"\")\n table.add_row([name, text])\n print(table)\n elif arg == ListChoice.personas:\n \"\"\"List all personas\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Text\"]\n for persona_file in utils.list_persona_files():\n print(persona_file)\n text = open(persona_file, \"r\").read()\n name = os.path.basename(persona_file).replace(\".txt\", \"\")\n table.add_row([name, text])\n print(table)\n elif arg == ListChoice.sources:\n \"\"\"List all data sources\"\"\"\n\n # create table\n table = PrettyTable()\n table.field_names = [\"Name\", \"Created At\", \"Agents\"]\n # TODO: eventually look accross all storage connections\n # TODO: add data source stats\n # TODO: connect to agents\n\n # get all sources\n for source in ms.list_sources(user_id=user_id):\n # get attached agents\n agent_ids = ms.list_attached_agents(source_id=source.id)\n agent_names = [ms.get_agent(agent_id=agent_id).name for agent_id in agent_ids]\n\n table.add_row([source.name, utils.format_datetime(source.created_at), \",\".join(agent_names)])\n\n print(table)\n else:\n raise ValueError(f\"Unknown argument {arg}\")" }, { "identifier": "add", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef add(\n option: str, # [human, persona]\n name: str = typer.Option(help=\"Name of human/persona\"),\n text: str = typer.Option(None, help=\"Text of human/persona\"),\n filename: str = typer.Option(None, \"-f\", help=\"Specify filename\"),\n):\n \"\"\"Add a person/human\"\"\"\n\n if option == \"persona\":\n directory = os.path.join(MEMGPT_DIR, \"personas\")\n elif option == \"human\":\n directory = os.path.join(MEMGPT_DIR, \"humans\")\n else:\n raise ValueError(f\"Unknown kind {option}\")\n\n if filename:\n assert text is None, f\"Cannot provide both filename and text\"\n # copy file to directory\n shutil.copyfile(filename, os.path.join(directory, name))\n if text:\n assert filename is None, f\"Cannot provide both filename and text\"\n # write text to file\n with open(os.path.join(directory, name), \"w\") as f:\n f.write(text)" }, { "identifier": "delete", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef delete(option: str, name: str):\n \"\"\"Delete a source from the archival memory.\"\"\"\n\n config = MemGPTConfig.load()\n user_id = uuid.UUID(config.anon_clientid)\n ms = MetadataStore(config)\n assert ms.get_user(user_id=user_id), f\"User {user_id} does not exist\"\n\n try:\n # delete from metadata\n if option == \"source\":\n # delete metadata\n source = ms.get_source(source_name=name, user_id=user_id)\n ms.delete_source(source_id=source.id)\n\n # delete from passages\n conn = StorageConnector.get_storage_connector(TableType.PASSAGES, config, user_id=user_id)\n conn.delete({\"data_source\": name})\n\n assert (\n conn.get_all({\"data_source\": name}) == []\n ), f\"Expected no passages with source {name}, but got {conn.get_all({'data_source': name})}\"\n\n # TODO: should we also delete from agents?\n elif option == \"agent\":\n agent = ms.get_agent(agent_name=name, user_id=user_id)\n\n # recall memory\n recall_conn = StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n recall_conn.delete({\"agent_id\": agent.id})\n\n # archival memory\n archival_conn = StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n archival_conn.delete({\"agent_id\": agent.id})\n\n # metadata\n ms.delete_agent(agent_id=agent.id)\n\n else:\n raise ValueError(f\"Option {option} not implemented\")\n\n typer.secho(f\"Deleted source '{name}'\", fg=typer.colors.GREEN)\n\n except Exception as e:\n typer.secho(f\"Failed to deleted source '{name}'\\n{e}\", fg=typer.colors.RED)" }, { "identifier": "app", "path": "memgpt/cli/cli_load.py", "snippet": "def insert_passages_into_source(passages: List[Passage], source_name: str, user_id: uuid.UUID, config: MemGPTConfig):\ndef insert_passages_into_source(passages: List[Passage], source_name: str, user_id: uuid.UUID, config: MemGPTConfig):\ndef store_docs(name, docs, user_id=None, show_progress=True):\ndef load_index(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n dir: str = typer.Option(help=\"Path to directory containing index.\"),\n user_id: uuid.UUID = None,\n):\ndef load_directory(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n input_dir: str = typer.Option(None, help=\"Path to directory containing dataset.\"),\n input_files: List[str] = typer.Option(None, help=\"List of paths to files containing dataset.\"),\n recursive: bool = typer.Option(False, help=\"Recursively search for files in directory.\"),\n extensions: str = typer.Option(default_extensions, help=\"Comma separated list of file extensions to load\"),\n user_id: str = typer.Option(None, help=\"User ID to associate with dataset.\"),\n):\ndef load_webpage(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n urls: List[str] = typer.Option(None, help=\"List of urls to load.\"),\n):\ndef load_database(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n query: str = typer.Option(help=\"Database query.\"),\n dump_path: str = typer.Option(None, help=\"Path to dump file.\"),\n scheme: str = typer.Option(None, help=\"Database scheme.\"),\n host: str = typer.Option(None, help=\"Database host.\"),\n port: int = typer.Option(None, help=\"Database port.\"),\n user: str = typer.Option(None, help=\"Database user.\"),\n password: str = typer.Option(None, help=\"Database password.\"),\n dbname: str = typer.Option(None, help=\"Database name.\"),\n):\ndef load_vector_database(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n uri: str = typer.Option(help=\"Database URI.\"),\n table_name: str = typer.Option(help=\"Name of table containing data.\"),\n text_column: str = typer.Option(help=\"Name of column containing text.\"),\n embedding_column: str = typer.Option(help=\"Name of column containing embedding.\"),\n user_id: uuid.UUID = None,\n):" }, { "identifier": "StorageConnector", "path": "memgpt/agent_store/storage.py", "snippet": "class StorageConnector:\n \"\"\"Defines a DB connection that is user-specific to access data: Documents, Passages, Archival/Recall Memory\"\"\"\n\n def __init__(self, table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n self.user_id = user_id\n self.agent_id = agent_id\n self.table_type = table_type\n\n # get object type\n if table_type == TableType.ARCHIVAL_MEMORY:\n self.type = Passage\n self.table_name = ARCHIVAL_TABLE_NAME\n elif table_type == TableType.RECALL_MEMORY:\n self.type = Message\n self.table_name = RECALL_TABLE_NAME\n elif table_type == TableType.DOCUMENTS:\n self.type = Document\n self.table_name == DOCUMENT_TABLE_NAME\n elif table_type == TableType.PASSAGES:\n self.type = Passage\n self.table_name = PASSAGE_TABLE_NAME\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n printd(f\"Using table name {self.table_name}\")\n\n # setup base filters for agent-specific tables\n if self.table_type == TableType.ARCHIVAL_MEMORY or self.table_type == TableType.RECALL_MEMORY:\n # agent-specific table\n assert agent_id is not None, \"Agent ID must be provided for agent-specific tables\"\n self.filters = {\"user_id\": self.user_id, \"agent_id\": self.agent_id}\n elif self.table_type == TableType.PASSAGES or self.table_type == TableType.DOCUMENTS:\n # setup base filters for user-specific tables\n assert agent_id is None, \"Agent ID must not be provided for user-specific tables\"\n self.filters = {\"user_id\": self.user_id}\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n def get_filters(self, filters: Optional[Dict] = {}):\n # get all filters for query\n if filters is not None:\n filter_conditions = {**self.filters, **filters}\n else:\n filter_conditions = self.filters\n return filter_conditions\n\n @staticmethod\n def get_storage_connector(table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:\n storage_type = config.archival_storage_type\n elif table_type == TableType.RECALL_MEMORY:\n storage_type = config.recall_storage_type\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n if storage_type == \"postgres\":\n from memgpt.agent_store.db import PostgresStorageConnector\n\n return PostgresStorageConnector(table_type, config, user_id, agent_id)\n elif storage_type == \"chroma\":\n from memgpt.agent_store.chroma import ChromaStorageConnector\n\n return ChromaStorageConnector(table_type, config, user_id, agent_id)\n\n # TODO: add back\n # elif storage_type == \"lancedb\":\n # from memgpt.agent_store.db import LanceDBConnector\n\n # return LanceDBConnector(agent_config=agent_config, table_type=table_type)\n\n elif storage_type == \"sqlite\":\n from memgpt.agent_store.db import SQLLiteStorageConnector\n\n return SQLLiteStorageConnector(table_type, config, user_id, agent_id)\n\n else:\n raise NotImplementedError(f\"Storage type {storage_type} not implemented\")\n\n @staticmethod\n def get_archival_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id, agent_id)\n\n @staticmethod\n def get_recall_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id, agent_id)\n\n @abstractmethod\n def get_filters(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:\n pass\n\n @abstractmethod\n def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:\n pass\n\n @abstractmethod\n def get(self, id: str) -> Optional[Record]:\n pass\n\n @abstractmethod\n def size(self, filters: Optional[Dict] = {}) -> int:\n pass\n\n @abstractmethod\n def insert(self, record: Record):\n pass\n\n @abstractmethod\n def insert_many(self, records: List[Record], show_progress=False):\n pass\n\n @abstractmethod\n def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:\n pass\n\n @abstractmethod\n def query_date(self, start_date, end_date):\n pass\n\n @abstractmethod\n def query_text(self, query):\n pass\n\n @abstractmethod\n def delete_table(self):\n pass\n\n @abstractmethod\n def delete(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def save(self):\n pass" }, { "identifier": "TableType", "path": "memgpt/agent_store/storage.py", "snippet": "class TableType:\n ARCHIVAL_MEMORY = \"archival_memory\" # recall memory table: memgpt_agent_{agent_id}\n RECALL_MEMORY = \"recall_memory\" # archival memory table: memgpt_agent_recall_{agent_id}\n PASSAGES = \"passages\" # TODO\n DOCUMENTS = \"documents\" # TODO" }, { "identifier": "MetadataStore", "path": "memgpt/metadata.py", "snippet": "class MetadataStore:\n def __init__(self, config: MemGPTConfig):\n # TODO: get DB URI or path\n if config.metadata_storage_type == \"postgres\":\n self.uri = config.metadata_storage_uri\n elif config.metadata_storage_type == \"sqlite\":\n path = os.path.join(config.metadata_storage_path, \"sqlite.db\")\n self.uri = f\"sqlite:///{path}\"\n else:\n raise ValueError(f\"Invalid metadata storage type: {config.metadata_storage_type}\")\n\n # TODO: check to see if table(s) need to be greated or not\n\n self.engine = create_engine(self.uri)\n Base.metadata.create_all(\n self.engine, tables=[UserModel.__table__, AgentModel.__table__, SourceModel.__table__, AgentSourceMappingModel.__table__]\n )\n session_maker = sessionmaker(bind=self.engine)\n self.session = session_maker()\n\n @enforce_types\n def create_agent(self, agent: AgentState):\n # insert into agent table\n # make sure agent.name does not already exist for user user_id\n if self.session.query(AgentModel).filter(AgentModel.name == agent.name).filter(AgentModel.user_id == agent.user_id).count() > 0:\n raise ValueError(f\"Agent with name {agent.name} already exists\")\n self.session.add(AgentModel(**vars(agent)))\n self.session.commit()\n\n @enforce_types\n def create_source(self, source: Source):\n # make sure source.name does not already exist for user\n if (\n self.session.query(SourceModel).filter(SourceModel.name == source.name).filter(SourceModel.user_id == source.user_id).count()\n > 0\n ):\n raise ValueError(f\"Source with name {source.name} already exists\")\n self.session.add(SourceModel(**vars(source)))\n self.session.commit()\n\n @enforce_types\n def create_user(self, user: User):\n if self.session.query(UserModel).filter(UserModel.id == user.id).count() > 0:\n raise ValueError(f\"User with id {user.id} already exists\")\n self.session.add(UserModel(**vars(user)))\n self.session.commit()\n\n @enforce_types\n def update_agent(self, agent: AgentState):\n self.session.query(AgentModel).filter(AgentModel.id == agent.id).update(vars(agent))\n self.session.commit()\n\n @enforce_types\n def update_user(self, user: User):\n self.session.query(UserModel).filter(UserModel.id == user.id).update(vars(user))\n self.session.commit()\n\n @enforce_types\n def update_source(self, source: Source):\n self.session.query(SourceModel).filter(SourceModel.id == source.id).update(vars(source))\n self.session.commit()\n\n @enforce_types\n def delete_agent(self, agent_id: uuid.UUID):\n self.session.query(AgentModel).filter(AgentModel.id == agent_id).delete()\n self.session.commit()\n\n @enforce_types\n def delete_source(self, source_id: uuid.UUID):\n # delete from sources table\n self.session.query(SourceModel).filter(SourceModel.id == source_id).delete()\n\n # delete any mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def delete_user(self, user_id: uuid.UUID):\n # delete from users table\n self.session.query(UserModel).filter(UserModel.id == user_id).delete()\n\n # delete associated agents\n self.session.query(AgentModel).filter(AgentModel.user_id == user_id).delete()\n\n # delete associated sources\n self.session.query(SourceModel).filter(SourceModel.user_id == user_id).delete()\n\n # delete associated mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.user_id == user_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def list_agents(self, user_id: uuid.UUID) -> List[AgentState]:\n results = self.session.query(AgentModel).filter(AgentModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def list_sources(self, user_id: uuid.UUID) -> List[Source]:\n results = self.session.query(SourceModel).filter(SourceModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def get_agent(\n self, agent_id: Optional[uuid.UUID] = None, agent_name: Optional[str] = None, user_id: Optional[uuid.UUID] = None\n ) -> Optional[AgentState]:\n if agent_id:\n results = self.session.query(AgentModel).filter(AgentModel.id == agent_id).all()\n else:\n assert agent_name is not None and user_id is not None, \"Must provide either agent_id or agent_name\"\n results = self.session.query(AgentModel).filter(AgentModel.name == agent_name).filter(AgentModel.user_id == user_id).all()\n\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\" # should only be one result\n return results[0].to_record()\n\n @enforce_types\n def get_user(self, user_id: uuid.UUID) -> Optional[User]:\n results = self.session.query(UserModel).filter(UserModel.id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n @enforce_types\n def get_source(\n self, source_id: Optional[uuid.UUID] = None, user_id: Optional[uuid.UUID] = None, source_name: Optional[str] = None\n ) -> Optional[Source]:\n if source_id:\n results = self.session.query(SourceModel).filter(SourceModel.id == source_id).all()\n else:\n assert user_id is not None and source_name is not None\n results = self.session.query(SourceModel).filter(SourceModel.name == source_name).filter(SourceModel.user_id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n # agent source metadata\n @enforce_types\n def attach_source(self, user_id: uuid.UUID, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.add(AgentSourceMappingModel(user_id=user_id, agent_id=agent_id, source_id=source_id))\n self.session.commit()\n\n @enforce_types\n def list_attached_sources(self, agent_id: uuid.UUID) -> List[Column]:\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.agent_id == agent_id).all()\n return [r.source_id for r in results]\n\n @enforce_types\n def list_attached_agents(self, source_id: uuid.UUID):\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).all()\n return [r.agent_id for r in results]\n\n @enforce_types\n def detach_source(self, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.query(AgentSourceMappingModel).filter(\n AgentSourceMappingModel.agent_id == agent_id, AgentSourceMappingModel.source_id == source_id\n ).delete()\n self.session.commit()" }, { "identifier": "save_agent", "path": "memgpt/metadata.py", "snippet": "def save_agent(agent: Agent, ms: MetadataStore):\n \"\"\"Save agent to metadata store\"\"\"\n\n agent.update_state()\n agent_state = agent.agent_state\n\n if ms.get_agent(agent_id=agent_state.id):\n ms.update_agent(agent_state)\n else:\n ms.create_agent(agent_state)" } ]
import shutil import configparser import uuid import logging import glob import os import sys import pickle import traceback import json import questionary import typer import memgpt.agent as agent import memgpt.system as system import memgpt.constants as constants import memgpt.errors as errors from rich.console import Console from prettytable import PrettyTable from memgpt.log import logger from memgpt.interface import CLIInterface as interface # for printing to terminal from memgpt.config import MemGPTConfig from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart, migrate from memgpt.cli.cli_config import configure, list, add, delete from memgpt.cli.cli_load import app as load_app from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore, save_agent
17,393
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version)
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version)
app.command(name="attach")(attach)
4
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "save_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def save_checkpoint(work_dir,\n epoch,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n keep_last=False,\n step=None,\n ):\n os.makedirs(work_dir, exist_ok=True)\n state_dict = dict(state_dict=model.state_dict())\n if model_ema is not None:\n state_dict['state_dict_ema'] = model_ema.state_dict()\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['scheduler'] = lr_scheduler.state_dict()\n if epoch is not None:\n state_dict['epoch'] = epoch\n file_path = os.path.join(work_dir, f\"epoch_{epoch}.pth\")\n if step is not None:\n file_path = file_path.split('.pth')[0] + f\"_step_{step}.pth\"\n logger = get_root_logger()\n torch.save(state_dict, file_path)\n logger.info(f'Saved checkpoint of epoch {epoch} to {file_path.format(epoch)}.')\n if keep_last:\n for i in range(epoch):\n previous_ckgt = file_path.format(i)\n if os.path.exists(previous_ckgt):\n os.remove(previous_ckgt)" }, { "identifier": "load_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def load_checkpoint(checkpoint,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n load_ema=False,\n resume_optimizer=True,\n resume_lr_scheduler=True\n ):\n assert isinstance(checkpoint, str)\n ckpt_file = checkpoint\n checkpoint = torch.load(ckpt_file, map_location=\"cpu\")\n\n state_dict_keys = ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']\n for key in state_dict_keys:\n if key in checkpoint['state_dict']:\n del checkpoint['state_dict'][key]\n if 'state_dict_ema' in checkpoint and key in checkpoint['state_dict_ema']:\n del checkpoint['state_dict_ema'][key]\n break\n\n if load_ema:\n state_dict = checkpoint['state_dict_ema']\n else:\n state_dict = checkpoint.get('state_dict', checkpoint) # to be compatible with the official checkpoint\n # model.load_state_dict(state_dict)\n missing, unexpect = model.load_state_dict(state_dict, strict=False)\n if model_ema is not None:\n model_ema.load_state_dict(checkpoint['state_dict_ema'], strict=False)\n if optimizer is not None and resume_optimizer:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None and resume_lr_scheduler:\n lr_scheduler.load_state_dict(checkpoint['scheduler'])\n logger = get_root_logger()\n if optimizer is not None:\n epoch = checkpoint.get('epoch', re.match(r'.*epoch_(\\d*).*.pth', ckpt_file).group()[0])\n logger.info(f'Resume checkpoint of epoch {epoch} from {ckpt_file}. Load ema: {load_ema}, '\n f'resume optimizer: {resume_optimizer}, resume lr scheduler: {resume_lr_scheduler}.')\n return epoch, missing, unexpect\n logger.info(f'Load checkpoint from {ckpt_file}. Load ema: {load_ema}.')\n return missing, unexpect" }, { "identifier": "synchronize", "path": "diffusion/utils/dist_utils.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "build_model", "path": "diffusion/model/builder.py", "snippet": "def build_model(cfg, use_grad_checkpoint=False, use_fp32_attention=False, gc_step=1, **kwargs):\n if isinstance(cfg, str):\n cfg = dict(type=cfg)\n model = MODELS.build(cfg, default_args=kwargs)\n if use_grad_checkpoint:\n set_grad_checkpoint(model, use_fp32_attention=use_fp32_attention, gc_step=gc_step)\n return model" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" }, { "identifier": "LCMScheduler", "path": "diffusion/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0\n sample = torch.clamp(sample, -s, s) / s # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // lcm_origin_steps\n lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data ** 2 / ((t / 0.1) ** 2 + self.sigma_data ** 2)\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data ** 2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" } ]
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,410
args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=config.valid_num) # used for balanced sampling # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=config.valid_num) # used for balanced sampling # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
lr_scale_ratio = auto_scale_lr(config.train_batch_size * get_world_size() * config.gradient_accumulation_steps,
16
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
datasets/waymo.py
[ { "identifier": "SceneLidarSource", "path": "datasets/base/lidar_source.py", "snippet": "class SceneLidarSource(abc.ABC):\n \"\"\"\n The base class for the lidar source of a scene.\n \"\"\"\n\n data_cfg: OmegaConf = None\n # the normalized timestamps of all points (normalized to [0, 1]), shape: (num_points,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all points, shape: (num_points,)\n _timestamps: Tensor = None\n # the timesteps of all points, shape: (num_points,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of lidar scans,\n # while timesteps are the integer timestep indices of lidar scans.\n _timesteps: Tensor = None\n # origin of each lidar point, shape: (num_points, 3)\n origins: Tensor = None\n # unit direction of each lidar point, shape: (num_points, 3)\n directions: Tensor = None\n # range of each lidar point, shape: (num_points,)\n ranges: Tensor = None\n # the transformation matrices from lidar to world coordinate system,\n lidar_to_worlds: Tensor = None\n # the indices of the lidar scans that are cached\n cached_indices: Tensor = None\n cached_origins: Tensor = None\n cached_directions: Tensor = None\n cached_ranges: Tensor = None\n cached_normalized_timestamps: Tensor = None\n\n def __init__(\n self,\n lidar_data_config: OmegaConf,\n device: torch.device = torch.device(\"cpu\"),\n ) -> None:\n # hold the config of the lidar data\n self.data_cfg = lidar_data_config\n self.device = device\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create a list of all the files in the dataset.\n e.g., a list of all the lidar scans in the dataset.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self):\n self.load_calibrations()\n self.load_lidar()\n logger.info(\"[Lidar] All Lidar Data loaded.\")\n\n def to(self, device: torch.device) -> \"SceneLidarSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.origins is not None:\n self.origins = self.origins.to(device)\n if self.directions is not None:\n self.directions = self.directions.to(device)\n if self.ranges is not None:\n self.ranges = self.ranges.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.lidar_to_worlds is not None:\n self.lidar_to_worlds = self.lidar_to_worlds.to(device)\n return self\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the calibration files of the dataset.\n e.g., lidar to world transformation matrices.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_lidar(self) -> None:\n \"\"\"\n Load the lidar data of the dataset from the filelist.\n \"\"\"\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n we assume the lidar points are already in the world coordinate system\n we first downsample the lidar points, then compute the aabb by taking the\n given percentiles of the lidar coordinates in each dimension.\n \"\"\"\n assert (\n self.origins is not None\n and self.directions is not None\n and self.ranges is not None\n ), \"Lidar points not loaded, cannot compute aabb.\"\n logger.info(\"[Lidar] Computing auto AABB based on downsampled lidar points....\")\n\n lidar_pts = self.origins + self.directions * self.ranges\n\n # downsample the lidar points by uniformly sampling a subset of them\n lidar_pts = lidar_pts[\n torch.randperm(len(lidar_pts))[\n : int(len(lidar_pts) / self.data_cfg.lidar_downsample_factor)\n ]\n ]\n # compute the aabb by taking the given percentiles of the lidar coordinates in each dimension\n aabb_min = torch.quantile(lidar_pts, self.data_cfg.lidar_percentile, dim=0)\n aabb_max = torch.quantile(lidar_pts, 1 - self.data_cfg.lidar_percentile, dim=0)\n del lidar_pts\n torch.cuda.empty_cache()\n\n # usually the lidar's height is very small, so we slightly increase the height of the aabb\n if aabb_max[-1] < 20:\n aabb_max[-1] = 20.0\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Lidar] Auto AABB from LiDAR: {aabb}\")\n return aabb\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of lidar timestamps in the dataset,\n usually the number of captured lidar scans.\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of each lidar timestamp,\n shape: (num_lidar_points,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of the lidar scans,\n while timesteps are the integer timestep indices of the lidar scans.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of the lidar scans.\n shape: (num_lidar_points,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of the lidar scans.\n Args:\n normalized_timestamps: the normalized timestamps of the lidar scans\n (normalized to the range [0, 1]).\n shape: (num_lidar_points,)\n Note:\n we normalize the lidar timestamps together with the image timestamps,\n so that the both the lidar and image timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.size(0) == self.origins.size(\n 0\n ), \"The number of lidar points and the number of normalized timestamps must match.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Tensor:\n \"\"\"\n Sample a batch of rays uniformly from the dataset.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n lidar_idx: the indices of the sampled lidar points.\n shape: (num_rays,)\n \"\"\"\n if candidate_indices is None:\n return torch.randint(\n 0, len(self.origins), size=(num_rays,), device=self.device\n )\n else:\n if not isinstance(candidate_indices, Tensor):\n candidate_indices = torch.tensor(candidate_indices, device=self.device)\n if self.cached_indices is None:\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n if not torch.equal(candidate_indices, self.cached_indices):\n print(\"Recomputing cached indices\")\n self.cached_indices = candidate_indices\n mask = self.timesteps.new_zeros(\n self.timesteps.size(0), dtype=torch.bool\n ) # Create a mask of False\n for index in self.cached_indices:\n mask |= (\n self.timesteps == index\n ) # Set mask values to True where timesteps match an index\n self.cached_origins = self.origins[mask]\n self.cached_directions = self.directions[mask]\n self.cached_ranges = self.ranges[mask]\n self.cached_normalized_timestamps = self.normalized_timestamps[mask]\n random_idx = torch.randint(\n 0,\n len(self.cached_origins),\n size=(num_rays,),\n device=self.device,\n )\n return random_idx\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the lidar scans to sample from.\n If None, sample from all the lidar scans.\n If not None, sample from the given lidar scans.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n lidar_idx = self.sample_uniform_rays(\n num_rays=num_rays, candidate_indices=candidate_indices\n )\n origins = self.cached_origins[lidar_idx]\n directions = self.cached_directions[lidar_idx]\n ranges = self.cached_ranges[lidar_idx]\n normalized_timestamps = self.cached_normalized_timestamps[lidar_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }\n\n def get_render_rays(self, time_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the of rays for rendering at the given timestep.\n Args:\n time_idx: the index of the lidar scan to render.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n origins = self.origins[self.timesteps == time_idx]\n directions = self.directions[self.timesteps == time_idx]\n ranges = self.ranges[self.timesteps == time_idx]\n normalized_timestamps = self.normalized_timestamps[self.timesteps == time_idx]\n return {\n \"lidar_origins\": origins,\n \"lidar_viewdirs\": directions,\n \"lidar_ranges\": ranges,\n \"lidar_normed_timestamps\": normalized_timestamps,\n }" }, { "identifier": "ScenePixelSource", "path": "datasets/base/pixel_source.py", "snippet": "class ScenePixelSource(abc.ABC):\n \"\"\"\n The base class for all pixel sources of a scene.\n \"\"\"\n\n # the original size of the images in the dataset\n # these values are from the waymo dataset as a placeholder\n ORIGINAL_SIZE = [[1280, 1920], [1280, 1920], [1280, 1920], [884, 1920], [884, 1920]]\n\n # define a transformation matrix to convert the opencv camera coordinate system to the dataset camera coordinate system\n OPENCV2DATASET = np.array(\n [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]]\n )\n data_cfg: OmegaConf = None\n # the normalized timestamps of all images (normalized to [0, 1]), shape: (num_imgs,)\n _normalized_timestamps: Tensor = None\n # the timestamps of all images, shape: (num_imgs,)\n _timestamps: Tensor = None\n # the timesteps of all images, shape: (num_imgs,)\n # - the difference between timestamps and timesteps is that\n # timestamps are the actual timestamps (minus 1e9) of images\n # while timesteps are the integer timestep indices of images.\n _timesteps: Tensor = None\n # camera ids of all images, shape: (num_imgs,)\n cam_ids: Tensor = None\n # camera-to-world matrices of all images, shape: (num_imgs, 4, 4)\n cam_to_worlds: Tensor = None\n # camera intrinsic matrices of all images, shape: (num_imgs, 3, 3)\n intrinsics: Tensor = None\n # all image tensors, shape: (num_imgs, load_size[0], load_size[1], 3)\n images: Tensor = None\n # the image ids of all images, shape: (num_imgs,)\n img_ids: Tensor = None\n # the binary masks of dynamic objects, shape: (num_imgs, load_size[0], load_size[1])\n dynamic_masks: Tensor = None\n # the binary masks of sky regions, shape: (num_imgs, load_size[0], load_size[1])\n sky_masks: Tensor = None\n # the feature tensors, shape: (num_imgs, num_patches_h, num_patches_w, C)\n features: Tensor = None\n # the pca matrix used to reduce the feature dimension to target_feature_dim,\n # shape: (original_feature_dim, target_feature_dim)\n reduce_to_target_dim_mat: Tensor = None\n # the min and max values of the reduced features used for normalization,\n # shape: (target_feature_dim,)\n feat_min: Tensor = None\n feat_max: Tensor = None\n\n # the pca matrix used to reduce the feature dimension for visualization,\n # shape: (target_feature_dim, 3)\n feat_dimension_reduction_mat: Tensor = None\n # the min and max values of the original features used for visualization,\n # shape: (3,)\n feat_color_min: Tensor = None\n feat_color_max: Tensor = None\n # the downscale factor of the features, shape: (2,)\n featmap_downscale_factor: Tuple[float, float] = None\n\n # importance sampling weights of all images,\n # shape: (num_imgs, load_size[0] // buffer_scale, load_size[1] // buffer_scale)\n pixel_error_maps: Tensor = None\n pixel_error_buffered: bool = False\n\n def __init__(\n self, pixel_data_config: OmegaConf, device: torch.device = torch.device(\"cpu\")\n ) -> None:\n # hold the config of the pixel data\n self.data_cfg = pixel_data_config\n self.device = device\n self._downscale_factor = 1 / pixel_data_config.downscale\n self._old_downscale_factor = 1 / pixel_data_config.downscale\n\n @abc.abstractmethod\n def create_all_filelist(self) -> None:\n \"\"\"\n Create file lists for all data files.\n e.g., img files, feature files, etc.\n \"\"\"\n self.img_filepaths = []\n self.feat_filepaths = []\n self.sky_mask_filepaths = []\n self.dynamic_mask_filepaths = []\n raise NotImplementedError\n\n @abc.abstractmethod\n def load_calibrations(self) -> None:\n \"\"\"\n Load the camera intrinsics, extrinsics, timestamps, etc.\n Compute the camera-to-world matrices, ego-to-world matrices, etc.\n \"\"\"\n raise NotImplementedError\n\n def load_data(self) -> None:\n \"\"\"\n A general function to load all data.\n \"\"\"\n self.load_calibrations()\n self.load_rgb()\n self.load_dynamic_mask()\n self.load_sky_mask()\n self.load_features()\n # build the pixel error buffer\n self.build_pixel_error_buffer()\n logger.info(\"[Pixel] All Pixel Data loaded.\")\n\n def to(self, device: torch.device) -> \"ScenePixelSource\":\n \"\"\"\n Move the dataset to the given device.\n Args:\n device: the device to move the dataset to.\n \"\"\"\n self.device = device\n if self.images is not None:\n self.images = self.images.to(device)\n if self.dynamic_masks is not None:\n self.dynamic_masks = self.dynamic_masks.to(device)\n if self.sky_masks is not None:\n self.sky_masks = self.sky_masks.to(device)\n if self.features is not None:\n # this step can be dangerous because the features are huge\n # TODO: add a flag to control whether to move the features to GPU\n self.features = self.features.to(device)\n if self.reduce_to_target_dim_mat is not None:\n self.reduce_to_target_dim_mat = self.reduce_to_target_dim_mat.to(\n self.device\n )\n if self.feat_min is not None:\n self.feat_min = self.feat_min.to(self.device)\n self.feat_max = self.feat_max.to(self.device)\n if self.feat_dimension_reduction_mat is not None:\n self.feat_dimension_reduction_mat = (\n self.feat_dimension_reduction_mat.to(self.device)\n )\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n if self.cam_to_worlds is not None:\n self.cam_to_worlds = self.cam_to_worlds.to(device)\n if self.intrinsics is not None:\n self.intrinsics = self.intrinsics.to(device)\n if self.cam_ids is not None:\n self.cam_ids = self.cam_ids.to(device)\n if self._timestamps is not None:\n self._timestamps = self._timestamps.to(device)\n if self._timesteps is not None:\n self._timesteps = self._timesteps.to(device)\n if self._normalized_timestamps is not None:\n self._normalized_timestamps = self._normalized_timestamps.to(device)\n if self.pixel_error_maps is not None:\n self.pixel_error_maps = self.pixel_error_maps.to(device)\n return self\n\n def load_rgb(self) -> None:\n \"\"\"\n Load the RGB images if they are available. We cache the images in memory for faster loading.\n Note this can be memory consuming.\n \"\"\"\n if not self.data_cfg.load_rgb:\n return\n images = []\n for fname in tqdm(\n self.img_filepaths, desc=\"Loading images\", dynamic_ncols=True\n ):\n rgb = Image.open(fname).convert(\"RGB\")\n # resize them to the load_size\n rgb = rgb.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n images.append(rgb)\n # normalize the images to [0, 1]\n self.images = torch.from_numpy(np.stack(images, axis=0)) / 255\n self.img_ids = torch.arange(len(self.images)).long()\n\n def load_dynamic_mask(self) -> None:\n \"\"\"\n Load the dynamic masks if they are available.\n \"\"\"\n if not self.data_cfg.load_dynamic_mask:\n return\n dynamic_masks = []\n for fname in tqdm(\n self.dynamic_mask_filepaths,\n desc=\"Loading dynamic masks\",\n dynamic_ncols=True,\n ):\n dyn_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n dyn_mask = dyn_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.BILINEAR\n )\n dynamic_masks.append(np.array(dyn_mask) > 0)\n self.dynamic_masks = torch.from_numpy(np.stack(dynamic_masks, axis=0)).float()\n\n def load_sky_mask(self) -> None:\n \"\"\"\n Load the sky masks if they are available.\n \"\"\"\n if not self.data_cfg.load_sky_mask:\n return\n sky_masks = []\n for fname in tqdm(\n self.sky_mask_filepaths, desc=\"Loading sky masks\", dynamic_ncols=True\n ):\n sky_mask = Image.open(fname).convert(\"L\")\n # resize them to the load_size\n sky_mask = sky_mask.resize(\n (self.data_cfg.load_size[1], self.data_cfg.load_size[0]), Image.NEAREST\n )\n sky_masks.append(np.array(sky_mask) > 0)\n self.sky_masks = torch.from_numpy(np.stack(sky_masks, axis=0)).float()\n\n def load_features(self) -> None:\n \"\"\"\n Load the features if they are available.\n \"\"\"\n if not self.data_cfg.load_features:\n return\n\n if not self.data_cfg.skip_feature_extraction:\n logger.info(f\"Extracting {self.data_cfg.feature_model_type}...\")\n return_dict = extract_and_save_features(\n input_img_path_list=self.img_filepaths,\n saved_feat_path_list=self.feat_filepaths,\n img_shape=self.data_cfg.feature_extraction_size,\n stride=self.data_cfg.feature_extraction_stride,\n model_type=self.data_cfg.feature_model_type,\n )\n\n features = []\n for fname in tqdm(\n self.feat_filepaths, desc=\"Loading features\", dynamic_ncols=True\n ):\n # mmap_mode=\"r\" is to avoid memory overflow when loading features\n # but it only slightly helps... do we have a better way to load features?\n feature = np.load(fname, mmap_mode=\"r\").squeeze()\n features.append(feature)\n # shape: (num_imgs, num_patches_h, num_patches_w, C)\n self.features = torch.from_numpy(np.stack(features, axis=0)).float()\n # featmap_downscale_factor is used to convert the image coordinates to ViT feature coordinates.\n # resizing ViT features to (H, W) using bilinear interpolation is infeasible.\n # imagine a feature array of shape (num_timesteps x num_cams, 640, 960, 768). it's too large to fit in GPU memory.\n self.featmap_downscale_factor = (\n self.features.shape[1] / self.data_cfg.load_size[0],\n self.features.shape[2] / self.data_cfg.load_size[1],\n )\n logger.info(\n f\"Loaded {self.features.shape} {self.data_cfg.feature_model_type} features.\"\n )\n logger.info(f\"Feature scale: {self.featmap_downscale_factor}\")\n logger.info(f\"Computing features PCA...\")\n # compute feature visualization matrix\n C = self.features.shape[-1]\n # no need to compute PCA on the entire set of features, we randomly sample 100k features\n temp_feats = self.features.reshape(-1, C)\n max_elements_to_compute_pca = min(100000, temp_feats.shape[0])\n selected_features = temp_feats[\n np.random.choice(\n temp_feats.shape[0], max_elements_to_compute_pca, replace=False\n )\n ]\n if self.data_cfg.target_feature_dim is not None:\n logger.info(\n f\"Reducing features to {self.data_cfg.target_feature_dim} dimensions.\"\n )\n # compute PCA to reduce the feature dimension to target_feature_dim\n U, S, reduce_to_target_dim_mat = torch.pca_lowrank(\n selected_features, q=self.data_cfg.target_feature_dim, niter=20\n )\n # compute the fraction of variance explained by target_feature_dim\n variances = S**2\n fraction_var_explained = variances / variances.sum()\n logger.info(f\"[PCA] fraction_var_explained: \\n{fraction_var_explained}\")\n logger.info(\n f\"[PCA] fraction_var_explained sum: {fraction_var_explained.sum()}\",\n )\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat\n\n # reduce the features to target_feature_dim\n selected_features = selected_features @ reduce_to_target_dim_mat\n self.features = self.features @ reduce_to_target_dim_mat\n C = self.features.shape[-1]\n\n # normalize the reduced features to [0, 1] along each dimension\n feat_min = self.features.reshape(-1, C).min(dim=0)[0]\n feat_max = self.features.reshape(-1, C).max(dim=0)[0]\n self.features = (self.features - feat_min) / (feat_max - feat_min)\n selected_features = (selected_features - feat_min) / (feat_max - feat_min)\n self.feat_min = feat_min.to(self.device)\n self.feat_max = feat_max.to(self.device)\n self.reduce_to_target_dim_mat = reduce_to_target_dim_mat.to(self.device)\n # we compute the first 3 principal components of the ViT features as the color\n reduction_mat, feat_color_min, feat_color_max = get_robust_pca(\n selected_features\n )\n # final features are of shape (num_imgs, num_patches_h, num_patches_w, target_feature_dim)\n self.features = self.features\n\n # save visualization parameters\n self.feat_dimension_reduction_mat = reduction_mat\n self.feat_color_min = feat_color_min\n self.feat_color_max = feat_color_max\n del temp_feats, selected_features\n\n logger.info(\n f\"Feature PCA computed, shape: {self.feat_dimension_reduction_mat.shape}\"\n )\n\n def delete_features(self) -> None:\n \"\"\"\n Delete the features if they exist.\n This is to save disk space. 2D features of a single sequence can be 30GB+.\n \"\"\"\n delete_features(self.feat_filepaths)\n\n def get_aabb(self) -> Tensor:\n \"\"\"\n Returns:\n aabb_min, aabb_max: the min and max of the axis-aligned bounding box of the scene\n Note:\n We compute the coarse aabb by using the front camera positions / trajectories. We then\n extend this aabb by 40 meters along horizontal directions and 20 meters up and 5 meters\n down along vertical directions.\n \"\"\"\n assert (\n self.cam_to_worlds is not None\n ), \"Camera poses not loaded, cannot compute aabb.\"\n logger.info(\"[Pixel] Computing auto AABB based on front camera trajectory....\")\n if self.num_cams == 1:\n # if there is only one camera, it's front camera\n front_cameras_positions = self.cam_to_worlds[:, :3, 3]\n elif self.num_cams == 3:\n # if there are three cameras, they are ordered as front_left, front, front_right\n front_cameras_positions = self.cam_to_worlds[1::3, :3, 3]\n elif self.num_cams == 5:\n # if there are five cameras, they are ordered as side_left, front_left, front, front_right, side_right\n front_cameras_positions = self.cam_to_worlds[2::5, :3, 3]\n elif self.num_cams == 6:\n # if there are six cameras, they are ordered as front_left, front, front_right, back_left, back, back_right\n front_cameras_positions = self.cam_to_worlds[2::6, :3, 3]\n\n # compute the aabb\n aabb_min = front_cameras_positions.min(dim=0)[0]\n aabb_max = front_cameras_positions.max(dim=0)[0]\n\n # extend aabb by 40 meters along forward direction and 40 meters along the left/right direction\n # aabb direction: x, y, z: front, left, up\n aabb_max[0] += 40\n aabb_max[1] += 40\n # when the car is driving uphills\n aabb_max[2] = min(aabb_max[2] + 20, 20)\n\n # for waymo, there will be a lot of waste of space because we don't have images in the back,\n # it's more reasonable to extend the aabb only by a small amount, e.g., 5 meters\n # we use 40 meters here for a more general case\n aabb_min[0] -= 40\n aabb_min[1] -= 40\n # when a car is driving downhills\n aabb_min[2] = max(aabb_min[2] - 5, -5)\n aabb = torch.tensor([*aabb_min, *aabb_max])\n logger.info(f\"[Pixel] Auto AABB from camera: {aabb}\")\n return aabb\n\n def get_features(\n self,\n img_id,\n y: Tensor,\n x: Tensor,\n downscale: Union[float, Tuple[float, float]] = 1.0,\n ) -> Tensor:\n \"\"\"\n Get the features at the given pixel coordinates.\n Args:\n img_id: the image index.\n y: the vertical coordinates of the pixels, shape: (num_rays,)\n x: the horizontal coordinates of the pixels, shape: (num_rays,)\n downscale: the downscale factor of the features.\n If it's a float, we use the same downscale factor for both height and width.\n If it's a tuple, we use the first value as the downscale factor for height\n and the second value as the downscale factor for width.\n Returns:\n features: the features at the given pixel coordinates.\n shape: (num_rays, feat_dim)\n \"\"\"\n if isinstance(downscale, float):\n downscale = (downscale, downscale)\n # we compute the nearest DINO feature for each pixel\n # map (x, y) in the (W, H) space to (x * dino_scale[0], y * dino_scale[1]) in the (W//patch_size, H//patch_size) space\n dino_y = (y * downscale[0]).long()\n dino_x = (x * downscale[1]).long()\n # dino_feats are in CPU memory (because they are huge), so we need to move them to GPU\n dino_feat = self.features[img_id, dino_y.cpu(), dino_x.cpu()]\n return dino_feat\n\n def build_pixel_error_buffer(self) -> None:\n \"\"\"\n Build the pixel error buffer.\n \"\"\"\n if self.buffer_ratio > 0:\n # shape: (num_imgs, H // buffer_downscale, W // buffer_downscale)\n self.pixel_error_maps = torch.ones(\n (\n len(self.cam_to_worlds),\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n ),\n dtype=torch.float32,\n device=self.device,\n )\n logger.info(\n f\"Successfully built pixel error buffer (log2(num_pixels) = {np.log2(len(self.pixel_error_maps.reshape(-1))):.2f}).\"\n )\n else:\n logger.info(\"Not building pixel error buffer because buffer_ratio <= 0.\")\n\n def update_pixel_error_maps(self, render_results: Dict[str, Tensor]) -> None:\n \"\"\"\n Update the pixel error buffer with the given render results.\n \"\"\"\n if self.pixel_error_maps is None:\n logger.info(\"Skipping pixel error buffer update because it's not built.\")\n return\n gt_rgbs = render_results[\"gt_rgbs\"]\n pred_rgbs = render_results[\"rgbs\"]\n gt_rgbs = torch.from_numpy(np.stack(gt_rgbs, axis=0))\n pred_rgbs = torch.from_numpy(np.stack(pred_rgbs, axis=0))\n pixel_error_maps = torch.abs(gt_rgbs - pred_rgbs).mean(dim=-1)\n assert pixel_error_maps.shape == self.pixel_error_maps.shape\n if \"dynamic_opacities\" in render_results:\n if len(render_results[\"dynamic_opacities\"]) > 0:\n dynamic_opacity = render_results[\"dynamic_opacities\"]\n dynamic_opacity = torch.from_numpy(np.stack(dynamic_opacity, axis=0))\n # we prioritize the dynamic objects by multiplying the error by 5\n pixel_error_maps[dynamic_opacity > 0.1] *= 5\n # update the pixel error buffer\n self.pixel_error_maps: Tensor = pixel_error_maps.to(self.device)\n # normalize the pixel error buffer to [0, 1]\n self.pixel_error_maps = (\n self.pixel_error_maps - self.pixel_error_maps.min()\n ) / (self.pixel_error_maps.max() - self.pixel_error_maps.min())\n self.pixel_error_buffered = True\n logger.info(\"Successfully updated pixel error buffer\")\n\n def visualize_pixel_sample_weights(self, indices: List[int]) -> np.ndarray:\n \"\"\"\n Visualize the pixel sample weights.\n Args:\n indices: the image indices to visualize.\n Returns:\n frames: the pixel sample weights of the given image.\n shape: (len(indices) // cams, H, num_cams * W, 3)\n \"\"\"\n frames = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )[indices]\n )\n frames = [np.stack([frame, frame, frame], axis=-1) for frame in frames]\n return np.uint8(np.concatenate(frames, axis=1) * 255)\n\n def get_pixel_sample_weights_video(self) -> List[np.ndarray]:\n \"\"\"\n Get the pixel sample weights video.\n Returns:\n frames: the pixel sample weights video.\n shape: (num_imgs // cams, H, num_cams * W, 3)\n \"\"\"\n assert self.buffer_ratio > 0, \"buffer_ratio must be > 0\"\n maps = []\n loss_maps = (\n self.pixel_error_maps.detach()\n .cpu()\n .numpy()\n .reshape(\n self.num_imgs,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n )\n for i in range(self.num_imgs):\n maps.append(loss_maps[i])\n return maps\n\n def sample_important_rays(\n self, num_rays, img_candidate_indices: Tensor = None\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates from the given images based on the pixel error buffer.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n assert self.pixel_error_buffered, \"Pixel error buffer not built.\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n sampled_indices = torch.multinomial(\n self.pixel_error_maps[img_candidate_indices].reshape(-1),\n num_rays,\n replacement=False,\n )\n # convert the sampled 1d indices to (img_idx, y, x)\n img_idx, y, x = idx_to_3d(\n sampled_indices,\n self.HEIGHT // self.buffer_downscale,\n self.WIDTH // self.buffer_downscale,\n )\n img_idx = img_candidate_indices[img_idx]\n\n # Upscale to the original resolution\n y, x = (y * self.buffer_downscale).long(), (x * self.buffer_downscale).long()\n\n # Add a random offset to avoid sampling the same pixel\n y += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n x += torch.randint(\n 0, self.buffer_downscale, (num_rays,), device=self.images.device\n )\n # Clamp to ensure coordinates don't exceed the image bounds\n y = torch.clamp(y, 0, self.HEIGHT - 1)\n x = torch.clamp(x, 0, self.WIDTH - 1)\n return img_idx, y, x\n\n def sample_uniform_rays(\n self,\n num_rays: int,\n img_candidate_indices: Tensor = None,\n ) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"\n Sample rays coordinates uniformly from the given images.\n Args:\n num_rays: the number of rays to sample.\n img_candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n img_id: the image indices of the sampled rays.\n shape: (num_rays,)\n y: the vertical coordinates of the sampled rays.\n shape: (num_rays,)\n x: the horizontal coordinates of the sampled rays.\n shape: (num_rays,)\n \"\"\"\n # if img_candidate_indices is None, use all image indices\n if img_candidate_indices is None:\n img_candidate_indices = torch.arange(len(self.images)).to(self.device)\n if not isinstance(img_candidate_indices, Tensor):\n img_candidate_indices = torch.tensor(img_candidate_indices).to(self.device)\n # sample random index based on img_candidate_indices\n random_idx = torch.randint(\n 0,\n len(img_candidate_indices),\n size=(num_rays,),\n device=self.device,\n )\n img_id = img_candidate_indices[random_idx]\n\n # sample pixels\n x = torch.randint(\n 0,\n self.WIDTH,\n size=(num_rays,),\n device=self.device,\n )\n y = torch.randint(\n 0,\n self.HEIGHT,\n size=(num_rays,),\n device=self.device,\n )\n x, y = x.long(), y.long()\n return img_id, y, x\n\n def get_train_rays(\n self,\n num_rays: int,\n candidate_indices: Tensor = None,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Get a batch of rays for training.\n Args:\n num_rays: the number of rays to sample.\n candidate_indices: the indices of the images to sample from.\n If None, sample from all the images.\n If not None, sample from the given images only.\n Returns:\n a dict of the sampled rays.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.buffer_ratio > 0 and self.pixel_error_buffered:\n num_roi_rays = int(num_rays * self.buffer_ratio)\n num_random_rays = num_rays - num_roi_rays\n random_img_idx, random_y, random_x = self.sample_uniform_rays(\n num_random_rays, candidate_indices\n )\n roi_img_idx, roi_y, roi_x = self.sample_important_rays(\n num_roi_rays, candidate_indices\n )\n img_idx = torch.cat([random_img_idx, roi_img_idx], dim=0)\n y = torch.cat([random_y, roi_y], dim=0)\n x = torch.cat([random_x, roi_x], dim=0)\n else:\n img_idx, y, x = self.sample_uniform_rays(\n num_rays=num_rays, img_candidate_indices=candidate_indices\n )\n pixel_coords = torch.stack([y / self.HEIGHT, x / self.WIDTH], dim=-1)\n if self.images is not None:\n rgb = self.images[img_idx, y, x]\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx, y, x]\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx, y, x].float()\n if self.features is not None:\n features = self.get_features(\n img_idx, y, x, downscale=self.featmap_downscale_factor\n )\n if self.normalized_timestamps is not None:\n normalized_timestamps = self.normalized_timestamps[img_idx]\n if self.cam_ids is not None:\n camera_id = self.cam_ids[img_idx]\n image_id = torch.ones_like(x) * img_idx\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx]\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norms\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n def get_render_rays(self, img_idx: int) -> Dict[str, Tensor]:\n \"\"\"\n Get the rays for rendering the given image index.\n Args:\n img_idx: the image index.\n Returns:\n a dict containing the rays for rendering the given image index.\n \"\"\"\n rgb, sky_mask, dynamic_mask, features = None, None, None, None\n pixel_coords, normalized_timestamps = None, None\n if self.images is not None:\n rgb = self.images[img_idx]\n if self.downscale_factor != 1.0:\n rgb = (\n torch.nn.functional.interpolate(\n rgb.unsqueeze(0).permute(0, 3, 1, 2),\n scale_factor=self.downscale_factor,\n mode=\"bicubic\",\n antialias=True,\n )\n .squeeze(0)\n .permute(1, 2, 0)\n )\n img_height, img_width = rgb.shape[:2]\n else:\n img_height, img_width = self.HEIGHT, self.WIDTH\n\n x, y = torch.meshgrid(\n torch.arange(img_width),\n torch.arange(img_height),\n indexing=\"xy\",\n )\n x, y = x.flatten(), y.flatten()\n x, y = x.to(self.device), y.to(self.device)\n # pixel coordinates\n pixel_coords = (\n torch.stack([y / img_height, x / img_width], dim=-1)\n .float()\n .reshape(img_height, img_width, 2)\n )\n\n if self.sky_masks is not None:\n sky_mask = self.sky_masks[img_idx]\n if self.downscale_factor != 1.0:\n sky_mask = (\n torch.nn.functional.interpolate(\n sky_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.dynamic_masks is not None:\n dynamic_mask = self.dynamic_masks[img_idx].float()\n if self.downscale_factor != 1.0:\n dynamic_mask = (\n torch.nn.functional.interpolate(\n dynamic_mask.unsqueeze(0).unsqueeze(0),\n scale_factor=self.downscale_factor,\n mode=\"nearest\",\n )\n .squeeze(0)\n .squeeze(0)\n )\n if self.features is not None:\n features = self.get_features(\n img_idx,\n y,\n x,\n downscale=(\n self.featmap_downscale_factor[0] / self.downscale_factor,\n self.featmap_downscale_factor[1] / self.downscale_factor,\n ),\n ).reshape(img_height, img_width, -1)\n\n if self.normalized_timestamps is not None:\n normalized_timestamps = torch.full(\n (img_height, img_width),\n self.normalized_timestamps[img_idx],\n dtype=torch.float32,\n )\n if self.cam_ids is not None:\n camera_id = torch.full(\n (img_height, img_width),\n self.cam_ids[img_idx],\n dtype=torch.long,\n )\n image_id = torch.full(\n (img_height, img_width),\n img_idx,\n dtype=torch.long,\n )\n c2w = self.cam_to_worlds[img_idx]\n intrinsics = self.intrinsics[img_idx] * self.downscale_factor\n intrinsics[2, 2] = 1.0\n origins, viewdirs, direction_norm = get_rays(x, y, c2w, intrinsics)\n origins = origins.reshape(img_height, img_width, 3)\n viewdirs = viewdirs.reshape(img_height, img_width, 3)\n direction_norm = direction_norm.reshape(img_height, img_width, 1)\n data = {\n \"origins\": origins,\n \"viewdirs\": viewdirs,\n \"direction_norm\": direction_norm,\n \"pixel_coords\": pixel_coords,\n \"normed_timestamps\": normalized_timestamps,\n \"img_idx\": image_id,\n \"cam_idx\": camera_id,\n \"pixels\": rgb,\n \"sky_masks\": sky_mask,\n \"dynamic_masks\": dynamic_mask,\n \"features\": features,\n }\n return {k: v for k, v in data.items() if v is not None}\n\n @property\n def num_cams(self) -> int:\n \"\"\"\n Returns:\n the number of cameras in the dataset\n \"\"\"\n return self.data_cfg.num_cams\n\n @property\n def num_imgs(self) -> int:\n \"\"\"\n Returns:\n the number of images in the dataset\n \"\"\"\n return len(self.cam_to_worlds)\n\n @property\n def num_timesteps(self) -> int:\n \"\"\"\n Returns:\n the number of image timesteps in the dataset\n \"\"\"\n return len(self.timesteps.unique())\n\n @property\n def timesteps(self) -> Tensor:\n \"\"\"\n Returns:\n the integer timestep indices of all images,\n shape: (num_imgs,)\n Note:\n the difference between timestamps and timesteps is that\n timestamps are the actual timestamps (minus 1e9) of images\n while timesteps are the integer timestep indices of images.\n \"\"\"\n return self._timesteps\n\n @property\n def timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the actual timestamps (minus 1e9) of all images,\n shape: (num_imgs,)\n \"\"\"\n return self._timestamps\n\n @property\n def normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the normalized timestamps of all images\n (normalized to the range [0, 1]),\n shape: (num_imgs,)\n \"\"\"\n return self._normalized_timestamps\n\n @property\n def unique_normalized_timestamps(self) -> Tensor:\n \"\"\"\n Returns:\n the unique normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_timesteps,)\n \"\"\"\n return self._unique_normalized_timestamps\n\n def register_normalized_timestamps(self, normalized_timestamps: Tensor) -> None:\n \"\"\"\n Register the normalized timestamps of all images.\n Args:\n normalized_timestamps: the normalized timestamps of all images\n (normalized to the range [0, 1]).\n shape: (num_imgs,)\n Note:\n we normalize the image timestamps together with the lidar timestamps,\n so that the both the image and lidar timestamps are in the range [0, 1].\n \"\"\"\n assert normalized_timestamps.shape[0] == len(\n self.img_filepaths\n ), \"The number of normalized timestamps must match the number of images.\"\n assert (\n normalized_timestamps.min() >= 0 and normalized_timestamps.max() <= 1\n ), \"The normalized timestamps must be in the range [0, 1].\"\n self._normalized_timestamps = normalized_timestamps.to(self.device)\n self._unique_normalized_timestamps = self._normalized_timestamps.unique()\n\n def find_closest_timestep(self, normed_timestamp: float) -> int:\n \"\"\"\n Find the closest timestep to the given timestamp.\n Args:\n normed_timestamp: the normalized timestamp to find the closest timestep for.\n Returns:\n the closest timestep to the given timestamp.\n \"\"\"\n return torch.argmin(\n torch.abs(self.unique_normalized_timestamps - normed_timestamp)\n )\n\n @property\n def HEIGHT(self) -> int:\n return self.data_cfg.load_size[0]\n\n @property\n def WIDTH(self) -> int:\n return self.data_cfg.load_size[1]\n\n @property\n def downscale_factor(self) -> float:\n \"\"\"\n Returns:\n downscale_factor: the downscale factor of the images\n \"\"\"\n return self._downscale_factor\n\n def update_downscale_factor(self, downscale: float) -> None:\n \"\"\"\n Args:\n downscale: the new downscale factor\n Updates the downscale factor\n \"\"\"\n self._old_downscale_factor = self._downscale_factor\n self._downscale_factor = downscale\n\n def reset_downscale_factor(self) -> None:\n \"\"\"\n Resets the downscale factor to the original value\n \"\"\"\n self._downscale_factor = self._old_downscale_factor\n\n @property\n def buffer_downscale(self) -> float:\n \"\"\"\n Returns:\n buffer_downscale: the downscale factor of the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_downscale\n\n @property\n def buffer_ratio(self) -> float:\n \"\"\"\n Returns:\n buffer_ratio: the ratio of the rays sampled from the pixel error buffer\n \"\"\"\n return self.data_cfg.sampler.buffer_ratio" }, { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "SplitWrapper", "path": "datasets/base/split_wrapper.py", "snippet": "class SplitWrapper(torch.utils.data.Dataset):\n\n # a sufficiently large number to make sure we don't run out of data\n _num_iters = 1000000\n\n def __init__(\n self,\n datasource: Union[ScenePixelSource, SceneLidarSource],\n split_indices: List[int] = None,\n split: str = \"train\",\n ray_batch_size: int = 4096,\n ):\n super().__init__()\n self.datasource = datasource\n self.split_indices = split_indices\n self.split = split\n self.ray_batch_size = ray_batch_size\n\n def __getitem__(self, idx) -> dict:\n if self.split == \"train\":\n # randomly sample rays from the training set\n return self.datasource.get_train_rays(\n num_rays=self.ray_batch_size,\n candidate_indices=self.split_indices,\n )\n else:\n # return all rays for the given index\n return self.datasource.get_render_rays(self.split_indices[idx])\n\n def __len__(self) -> int:\n if self.split == \"train\":\n return self.num_iters\n else:\n return len(self.split_indices)\n\n @property\n def num_iters(self) -> int:\n return self._num_iters\n\n def set_num_iters(self, num_iters) -> None:\n self._num_iters = num_iters" }, { "identifier": "voxel_coords_to_world_coords", "path": "datasets/utils.py", "snippet": "def voxel_coords_to_world_coords(\n aabb_min: Union[Tensor, List[float]],\n aabb_max: Union[Tensor, List[float]],\n voxel_resolution: Union[Tensor, List[int]],\n points: Union[Tensor, List[float]] = None,\n) -> Tensor:\n \"\"\"\n Converts voxel coordinates to world coordinates.\n\n Args:\n aabb_min (Union[Tensor, List[float]]): Minimum coordinates of the axis-aligned bounding box (AABB) of the voxel grid.\n aabb_max (Union[Tensor, List[float]]): Maximum coordinates of the AABB of the voxel grid.\n voxel_resolution (Union[Tensor, List[int]]): Number of voxels in each dimension of the voxel grid.\n points (Union[Tensor, List[float]], optional):\n Tensor of voxel coordinates to convert to world coordinates.\n If None, returns a grid of world coordinates. Defaults to None.\n Returns:\n Tensor: Tensor of world coordinates.\n \"\"\"\n aabb_min = torch.tensor(aabb_min) if isinstance(aabb_min, List) else aabb_min\n aabb_max = torch.tensor(aabb_max) if isinstance(aabb_max, List) else aabb_max\n voxel_resolution = (\n torch.tensor(voxel_resolution)\n if isinstance(voxel_resolution, List)\n else voxel_resolution\n )\n\n if points is None:\n x, y, z = torch.meshgrid(\n torch.linspace(aabb_min[0], aabb_max[0], voxel_resolution[0]),\n torch.linspace(aabb_min[1], aabb_max[1], voxel_resolution[1]),\n torch.linspace(aabb_min[2], aabb_max[2], voxel_resolution[2]),\n )\n return torch.stack([x, y, z], dim=-1)\n else:\n points = torch.tensor(points) if isinstance(points, List) else points\n\n # Compute voxel size\n voxel_size = (aabb_max - aabb_min) / voxel_resolution\n\n # Convert voxel coordinates to world coordinates\n world_coords = aabb_min.to(points.device) + points * voxel_size.to(\n points.device\n )\n return world_coords" }, { "identifier": "depth_visualizer", "path": "radiance_fields/video_utils.py", "snippet": "def render_pixels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator,\n dataset: SplitWrapper,\n proposal_networks: Optional[List[DensityField]] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n return_decomposition: bool = True,\n):\ndef render(\n dataset: SplitWrapper,\n render_func: Callable,\n model: Optional[RadianceField] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n):\ndef save_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_seperate_video: bool = False,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\ndef save_concatenated_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\ndef save_seperate_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n fps: int = 10,\n verbose: bool = False,\n save_images: bool = False,\n):" } ]
import logging import os import numpy as np import torch from typing import Dict from omegaconf import OmegaConf from torch import Tensor from tqdm import trange from datasets.base.lidar_source import SceneLidarSource from datasets.base.pixel_source import ScenePixelSource from datasets.base.scene_dataset import SceneDataset from datasets.base.split_wrapper import SplitWrapper from datasets.utils import voxel_coords_to_world_coords from radiance_fields.video_utils import depth_visualizer, save_videos, scene_flow_to_rgb
17,561
render_results=video_dict, save_pth=kwargs["save_pth"], num_timestamps=kwargs["num_timestamps"], keys=kwargs["keys"], num_cams=kwargs["num_cams"], fps=kwargs["fps"], verbose=kwargs["verbose"], save_seperate_video=kwargs["save_seperate_video"], ) def render_data_videos( self, save_pth: str, split: str = "full", fps: int = 24, verbose=True, ): """ Render a video of data. """ pixel_dataset, lidar_dataset = None, None if split == "full": if self.pixel_source is not None: pixel_dataset = self.full_pixel_set if self.lidar_source is not None: lidar_dataset = self.full_lidar_set elif split == "train": if self.pixel_source is not None: pixel_dataset = self.train_pixel_set if self.lidar_source is not None: lidar_dataset = self.train_lidar_set elif split == "test": if self.pixel_source is not None: pixel_dataset = self.test_pixel_set if self.lidar_source is not None: lidar_dataset = self.test_lidar_set else: raise NotImplementedError(f"Split {split} not supported") # pixel source rgb_imgs, dynamic_objects = [], [] sky_masks, feature_pca_colors = [], [] lidar_depths, flow_colors = [], [] for i in trange( len(pixel_dataset), desc="Rendering data videos", dynamic_ncols=True ): data_dict = pixel_dataset[i] if "pixels" in data_dict: rgb_imgs.append(data_dict["pixels"].cpu().numpy()) if "dynamic_masks" in data_dict: dynamic_objects.append( (data_dict["dynamic_masks"].unsqueeze(-1) * data_dict["pixels"]) .cpu() .numpy() ) if "sky_masks" in data_dict: sky_masks.append(data_dict["sky_masks"].cpu().numpy()) if "features" in data_dict: features = data_dict["features"] # use registered parameters to normalize the features for visualization features = features @ self.pixel_source.feat_dimension_reduction_mat features = (features - self.pixel_source.feat_color_min) / ( self.pixel_source.feat_color_max - self.pixel_source.feat_color_min ).clamp(0, 1) feature_pca_colors.append(features.cpu().numpy()) if lidar_dataset is not None: # to deal with asynchronized data # find the closest lidar scan to the current image in time closest_lidar_idx = self.lidar_source.find_closest_timestep( data_dict["normed_timestamps"].flatten()[0] ) data_dict = lidar_dataset[closest_lidar_idx] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) # project lidar points to the image plane # TODO: consider making this a function intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[i], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = intrinsic_4x4 @ self.pixel_source.cam_to_worlds[i].inverse() lidar_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = lidar_points[:, 2] cam_points = lidar_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) depth = depth[valid_mask] _cam_points = cam_points[valid_mask] depth_map = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH ).to(self.device) depth_map[ _cam_points[:, 1].long(), _cam_points[:, 0].long() ] = depth.squeeze(-1) depth_img = depth_map.cpu().numpy() depth_img = depth_visualizer(depth_img, depth_img > 0) mask = (depth_map.unsqueeze(-1) > 0).cpu().numpy() # show the depth map on top of the rgb image image = rgb_imgs[-1] * (1 - mask) + depth_img * mask lidar_depths.append(image) # project lidar flows to the image plane flow_img = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH, 3 ).to(self.device) # to examine whether the ground labels are correct valid_mask = valid_mask & (~data_dict["lidar_ground"]) _cam_points = cam_points[valid_mask] # final color: # white if no flow, black if ground, and flow color otherwise
logger = logging.getLogger() class WaymoPixelSource(ScenePixelSource): ORIGINAL_SIZE = [[1280, 1920], [1280, 1920], [1280, 1920], [884, 1920], [884, 1920]] OPENCV2DATASET = np.array( [[0, 0, 1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]] ) def __init__( self, pixel_data_config: OmegaConf, data_path: str, start_timestep: int, end_timestep: int, device: torch.device = torch.device("cpu"), ): super().__init__(pixel_data_config, device=device) self.data_path = data_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.create_all_filelist() self.load_data() def create_all_filelist(self): """ Create file lists for all data files. e.g., img files, feature files, etc. """ # ---- define camera list ---- # # 0: front, 1: front_left, 2: front_right, 3: side_left, 4: side_right if self.num_cams == 1: self.camera_list = [0] elif self.num_cams == 3: self.camera_list = [1, 0, 2] elif self.num_cams == 5: self.camera_list = [3, 1, 0, 2, 4] else: raise NotImplementedError( f"num_cams: {self.num_cams} not supported for waymo dataset" ) # ---- define filepaths ---- # img_filepaths, feat_filepaths = [], [] dynamic_mask_filepaths, sky_mask_filepaths = [], [] # Note: we assume all the files in waymo dataset are synchronized for t in range(self.start_timestep, self.end_timestep): for cam_idx in self.camera_list: img_filepaths.append( os.path.join(self.data_path, "images", f"{t:03d}_{cam_idx}.jpg") ) dynamic_mask_filepaths.append( os.path.join( self.data_path, "dynamic_masks", f"{t:03d}_{cam_idx}.png" ) ) sky_mask_filepaths.append( os.path.join(self.data_path, "sky_masks", f"{t:03d}_{cam_idx}.png") ) feat_filepaths.append( os.path.join( self.data_path, self.data_cfg.feature_model_type, f"{t:03d}_{cam_idx}.npy", ) ) self.img_filepaths = np.array(img_filepaths) self.dynamic_mask_filepaths = np.array(dynamic_mask_filepaths) self.sky_mask_filepaths = np.array(sky_mask_filepaths) self.feat_filepaths = np.array(feat_filepaths) def load_calibrations(self): """ Load the camera intrinsics, extrinsics, timestamps, etc. Compute the camera-to-world matrices, ego-to-world matrices, etc. """ # to store per-camera intrinsics and extrinsics _intrinsics = [] cam_to_egos = [] for i in range(self.num_cams): # load camera intrinsics # 1d Array of [f_u, f_v, c_u, c_v, k{1, 2}, p{1, 2}, k{3}]. # ====!! we did not use distortion parameters for simplicity !!==== # to be improved!! intrinsic = np.loadtxt( os.path.join(self.data_path, "intrinsics", f"{i}.txt") ) fx, fy, cx, cy = intrinsic[0], intrinsic[1], intrinsic[2], intrinsic[3] # scale intrinsics w.r.t. load size fx, fy = ( fx * self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[i][1], fy * self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[i][0], ) cx, cy = ( cx * self.data_cfg.load_size[1] / self.ORIGINAL_SIZE[i][1], cy * self.data_cfg.load_size[0] / self.ORIGINAL_SIZE[i][0], ) intrinsic = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]) _intrinsics.append(intrinsic) # load camera extrinsics cam_to_ego = np.loadtxt( os.path.join(self.data_path, "extrinsics", f"{i}.txt") ) # because we use opencv coordinate system to generate camera rays, # we need a transformation matrix to covnert rays from opencv coordinate # system to waymo coordinate system. # opencv coordinate system: x right, y down, z front # waymo coordinate system: x front, y left, z up cam_to_egos.append(cam_to_ego @ self.OPENCV2DATASET) # compute per-image poses and intrinsics cam_to_worlds, ego_to_worlds = [], [] intrinsics, cam_ids = [], [] # ===! for waymo, we simplify timestamps as the time indices timestamps, timesteps = [], [] # we tranform the camera poses w.r.t. the first timestep to make the translation vector of # the first ego pose as the origin of the world coordinate system. ego_to_world_start = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{self.start_timestep:03d}.txt") ) for t in range(self.start_timestep, self.end_timestep): ego_to_world_current = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{t:03d}.txt") ) # compute ego_to_world transformation ego_to_world = np.linalg.inv(ego_to_world_start) @ ego_to_world_current ego_to_worlds.append(ego_to_world) for cam_id in self.camera_list: cam_ids.append(cam_id) # transformation: # (opencv_cam -> waymo_cam -> waymo_ego_vehicle) -> current_world cam2world = ego_to_world @ cam_to_egos[cam_id] cam_to_worlds.append(cam2world) intrinsics.append(_intrinsics[cam_id]) # ===! we use time indices as the timestamp for waymo dataset for simplicity # ===! we can use the actual timestamps if needed # to be improved timestamps.append(t - self.start_timestep) timesteps.append(t - self.start_timestep) self.intrinsics = torch.from_numpy(np.stack(intrinsics, axis=0)).float() self.cam_to_worlds = torch.from_numpy(np.stack(cam_to_worlds, axis=0)).float() self.ego_to_worlds = torch.from_numpy(np.stack(ego_to_worlds, axis=0)).float() self.cam_ids = torch.from_numpy(np.stack(cam_ids, axis=0)).long() # the underscore here is important. self._timestamps = torch.from_numpy(np.stack(timestamps, axis=0)).float() self._timesteps = torch.from_numpy(np.stack(timesteps, axis=0)).long() class WaymoLiDARSource(SceneLidarSource): def __init__( self, lidar_data_config: OmegaConf, data_path: str, start_timestep: int, end_timestep: int, device: torch.device = torch.device("cpu"), ): super().__init__(lidar_data_config, device=device) self.data_path = data_path self.start_timestep = start_timestep self.end_timestep = end_timestep self.create_all_filelist() self.load_data() def create_all_filelist(self): """ Create a list of all the files in the dataset. e.g., a list of all the lidar scans in the dataset. """ lidar_filepaths = [] for t in range(self.start_timestep, self.end_timestep): lidar_filepaths.append( os.path.join(self.data_path, "lidar", f"{t:03d}.bin") ) self.lidar_filepaths = np.array(lidar_filepaths) def load_calibrations(self): """ Load the calibration files of the dataset. e.g., lidar to world transformation matrices. """ # Note that in the Waymo Open Dataset, the lidar coordinate system is the same # as the vehicle coordinate system lidar_to_worlds = [] # we tranform the poses w.r.t. the first timestep to make the origin of the # first ego pose as the origin of the world coordinate system. ego_to_world_start = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{self.start_timestep:03d}.txt") ) for t in range(self.start_timestep, self.end_timestep): ego_to_world_current = np.loadtxt( os.path.join(self.data_path, "ego_pose", f"{t:03d}.txt") ) # compute ego_to_world transformation lidar_to_world = np.linalg.inv(ego_to_world_start) @ ego_to_world_current lidar_to_worlds.append(lidar_to_world) self.lidar_to_worlds = torch.from_numpy( np.stack(lidar_to_worlds, axis=0) ).float() def load_lidar(self): """ Load the lidar data of the dataset from the filelist. """ origins, directions, ranges, laser_ids = [], [], [], [] # flow/ground info are used for evaluation only flows, flow_classes, grounds = [], [], [] # in waymo, we simplify timestamps as the time indices timestamps, timesteps = [], [] accumulated_num_original_rays = 0 accumulated_num_rays = 0 for t in trange( 0, len(self.lidar_filepaths), desc="Loading lidar", dynamic_ncols=True ): # each lidar_info contains an Nx14 array # from left to right: # origins: 3d, points: 3d, flows: 3d, flow_class: 1d, # ground_labels: 1d, intensities: 1d, elongations: 1d, laser_ids: 1d lidar_info = np.memmap( self.lidar_filepaths[t], dtype=np.float32, mode="r", ).reshape(-1, 14) original_length = len(lidar_info) accumulated_num_original_rays += original_length # select lidar points based on the laser id if self.data_cfg.only_use_top_lidar: # laser_ids: 0: TOP, 1: FRONT, 2: SIDE_LEFT, 3: SIDE_RIGHT, 4: REAR lidar_info = lidar_info[lidar_info[:, 13] == 0] lidar_origins = torch.from_numpy(lidar_info[:, :3]).float() lidar_points = torch.from_numpy(lidar_info[:, 3:6]).float() lidar_ids = torch.from_numpy(lidar_info[:, 13]).float() lidar_flows = torch.from_numpy(lidar_info[:, 6:9]).float() lidar_flow_classes = torch.from_numpy(lidar_info[:, 9]).long() ground_labels = torch.from_numpy(lidar_info[:, 10]).long() # we don't collect intensities and elongations for now # select lidar points based on a truncated ego-forward-directional range # this is to make sure most of the lidar points are within the range of the camera valid_mask = torch.ones_like(lidar_origins[:, 0]).bool() if self.data_cfg.truncated_max_range is not None: valid_mask = lidar_points[:, 0] < self.data_cfg.truncated_max_range if self.data_cfg.truncated_min_range is not None: valid_mask = valid_mask & ( lidar_points[:, 0] > self.data_cfg.truncated_min_range ) lidar_origins = lidar_origins[valid_mask] lidar_points = lidar_points[valid_mask] lidar_ids = lidar_ids[valid_mask] lidar_flows = lidar_flows[valid_mask] lidar_flow_classes = lidar_flow_classes[valid_mask] ground_labels = ground_labels[valid_mask] # transform lidar points from lidar coordinate system to world coordinate system lidar_origins = ( self.lidar_to_worlds[t][:3, :3] @ lidar_origins.T + self.lidar_to_worlds[t][:3, 3:4] ).T lidar_points = ( self.lidar_to_worlds[t][:3, :3] @ lidar_points.T + self.lidar_to_worlds[t][:3, 3:4] ).T # scene flows are in the lidar coordinate system, so we need to rotate them lidar_flows = (self.lidar_to_worlds[t][:3, :3] @ lidar_flows.T).T # compute lidar directions lidar_directions = lidar_points - lidar_origins lidar_ranges = torch.norm(lidar_directions, dim=-1, keepdim=True) lidar_directions = lidar_directions / lidar_ranges # we use time indices as the timestamp for waymo dataset lidar_timestamp = torch.ones_like(lidar_ranges).squeeze(-1) * t accumulated_num_rays += len(lidar_ranges) origins.append(lidar_origins) directions.append(lidar_directions) ranges.append(lidar_ranges) laser_ids.append(lidar_ids) flows.append(lidar_flows) flow_classes.append(lidar_flow_classes) grounds.append(ground_labels) # we use time indices as the timestamp for waymo dataset timestamps.append(lidar_timestamp) timesteps.append(lidar_timestamp) logger.info( f"Number of lidar rays: {accumulated_num_rays} " f"({accumulated_num_rays / accumulated_num_original_rays * 100:.2f}% of " f"{accumulated_num_original_rays} original rays)" ) logger.info("Filter condition:") logger.info(f" only_use_top_lidar: {self.data_cfg.only_use_top_lidar}") logger.info(f" truncated_max_range: {self.data_cfg.truncated_max_range}") logger.info(f" truncated_min_range: {self.data_cfg.truncated_min_range}") self.origins = torch.cat(origins, dim=0) self.directions = torch.cat(directions, dim=0) self.ranges = torch.cat(ranges, dim=0) self.laser_ids = torch.cat(laser_ids, dim=0) # becasue the flows here are velocities (m/s), and the fps of the lidar is 10, # we need to divide the velocities by 10 to get the displacements/flows # between two consecutive lidar scans self.flows = torch.cat(flows, dim=0) / 10.0 self.flow_classes = torch.cat(flow_classes, dim=0) self.grounds = torch.cat(grounds, dim=0).bool() # the underscore here is important. self._timestamps = torch.cat(timestamps, dim=0) self._timesteps = torch.cat(timesteps, dim=0) def to(self, device: torch.device): super().to(device) self.flows = self.flows.to(device) self.flow_classes = self.flow_classes.to(device) self.grounds = self.grounds.to(self.device) def get_render_rays(self, time_idx: int) -> Dict[str, Tensor]: """ Override the base class function to add more information to the render rays. """ return { "lidar_origins": self.origins[self.timesteps == time_idx], "lidar_viewdirs": self.directions[self.timesteps == time_idx], "lidar_ranges": self.ranges[self.timesteps == time_idx], # normalized timestamps between 0 and 1 "lidar_normed_timestamps": self.normalized_timestamps[ self.timesteps == time_idx ], "lidar_flow": self.flows[self.timesteps == time_idx], "lidar_flow_class": self.flow_classes[self.timesteps == time_idx], "lidar_ground": self.grounds[self.timesteps == time_idx], } class WaymoDataset(SceneDataset): dataset: str = "waymo" def __init__( self, data_cfg: OmegaConf, ) -> None: super().__init__(data_cfg) self.data_path = os.path.join(self.data_cfg.data_root, f"{self.scene_idx:03d}") assert self.data_cfg.dataset == "waymo" assert os.path.exists(self.data_path), f"{self.data_path} does not exist" # ---- find the number of synchronized frames ---- # if self.data_cfg.end_timestep == -1: num_files = len(os.listdir(os.path.join(self.data_path, "ego_pose"))) end_timestep = num_files - 1 else: end_timestep = self.data_cfg.end_timestep # to make sure the last timestep is included self.end_timestep = end_timestep + 1 self.start_timestep = self.data_cfg.start_timestep # ---- create data source ---- # self.pixel_source, self.lidar_source = self.build_data_source() self.aabb = self.get_aabb() # ---- define train and test indices ---- # # note that the timestamps of the pixel source and the lidar source are the same in waymo dataset ( self.train_timesteps, self.test_timesteps, self.train_indices, self.test_indices, ) = self.split_train_test() # ---- create split wrappers ---- # pixel_sets, lidar_sets = self.build_split_wrapper() self.train_pixel_set, self.test_pixel_set, self.full_pixel_set = pixel_sets self.train_lidar_set, self.test_lidar_set, self.full_lidar_set = lidar_sets def build_split_wrapper(self): """ Makes each data source as a Pytorch Dataset """ train_pixel_set, test_pixel_set, full_pixel_set = None, None, None train_lidar_set, test_lidar_set, full_lidar_set = None, None, None if self.pixel_source is not None: train_pixel_set = SplitWrapper( datasource=self.pixel_source, # train_indices are img indices, so the length is num_cams * num_train_timesteps split_indices=self.train_indices, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_pixel_set = SplitWrapper( datasource=self.pixel_source, # cover all the images split_indices=np.arange(self.pixel_source.num_imgs).tolist(), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if len(self.test_indices) > 0: test_pixel_set = SplitWrapper( datasource=self.pixel_source, # test_indices are img indices, so the length is num_cams * num_test_timesteps split_indices=self.test_indices, split="test", ray_batch_size=self.data_cfg.ray_batch_size, ) if self.lidar_source is not None: train_lidar_set = SplitWrapper( datasource=self.lidar_source, # train_timesteps are lidar indices, so the length is num_train_timesteps split_indices=self.train_timesteps, split="train", ray_batch_size=self.data_cfg.ray_batch_size, ) full_lidar_set = SplitWrapper( datasource=self.lidar_source, # cover all the lidar scans split_indices=np.arange(self.lidar_source.num_timesteps), split="full", ray_batch_size=self.data_cfg.ray_batch_size, ) if len(self.test_indices) > 0: test_lidar_set = SplitWrapper( datasource=self.lidar_source, # test_timesteps are lidar indices, so the length is num_test_timesteps split_indices=self.test_timesteps, split="test", ray_batch_size=self.data_cfg.ray_batch_size, ) pixel_set = (train_pixel_set, test_pixel_set, full_pixel_set) lidar_set = (train_lidar_set, test_lidar_set, full_lidar_set) return pixel_set, lidar_set def build_data_source(self): """ Create the data source for the dataset. """ pixel_source, lidar_source = None, None # to collect all timestamps from pixel source and lidar source all_timestamps = [] # ---- create pixel source ---- # load_pixel = ( self.data_cfg.pixel_source.load_rgb or self.data_cfg.pixel_source.load_sky_mask or self.data_cfg.pixel_source.load_dynamic_mask or self.data_cfg.pixel_source.load_feature ) if load_pixel: pixel_source = WaymoPixelSource( self.data_cfg.pixel_source, self.data_path, self.start_timestep, self.end_timestep, device=self.device, ) pixel_source.to(self.device) # collect img timestamps all_timestamps.append(pixel_source.timestamps) # ---- create lidar source ---- # if self.data_cfg.lidar_source.load_lidar: lidar_source = WaymoLiDARSource( self.data_cfg.lidar_source, self.data_path, self.start_timestep, self.end_timestep, device=self.device, ) lidar_source.to(self.device) # collect lidar timestamps all_timestamps.append(lidar_source.timestamps) assert len(all_timestamps) > 0, "No data source is loaded" all_timestamps = torch.cat(all_timestamps, dim=0) # normalize the timestamps jointly for pixel source and lidar source # so that the normalized timestamps are between 0 and 1 all_timestamps = (all_timestamps - all_timestamps.min()) / ( all_timestamps.max() - all_timestamps.min() ) if pixel_source is not None: pixel_source.register_normalized_timestamps( all_timestamps[: len(pixel_source.timestamps)] ) if lidar_source is not None: lidar_source.register_normalized_timestamps( all_timestamps[-len(lidar_source.timestamps) :] ) return pixel_source, lidar_source def split_train_test(self): if self.data_cfg.pixel_source.test_image_stride != 0: test_timesteps = np.arange( # it makes no sense to have test timesteps before the start timestep self.data_cfg.pixel_source.test_image_stride, self.num_img_timesteps, self.data_cfg.pixel_source.test_image_stride, ) else: test_timesteps = [] train_timesteps = np.array( [i for i in range(self.num_img_timesteps) if i not in test_timesteps] ) logger.info( f"Train timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[train_timesteps]}" ) logger.info( f"Test timesteps: \n{np.arange(self.start_timestep, self.end_timestep)[test_timesteps]}" ) # propagate the train and test timesteps to the train and test indices train_indices, test_indices = [], [] for t in range(self.num_img_timesteps): if t in train_timesteps: for cam in range(self.pixel_source.num_cams): train_indices.append(t * self.pixel_source.num_cams + cam) elif t in test_timesteps: for cam in range(self.pixel_source.num_cams): test_indices.append(t * self.pixel_source.num_cams + cam) logger.info(f"Number of train indices: {len(train_indices)}") logger.info(f"Train indices: {train_indices}") logger.info(f"Number of test indices: {len(test_indices)}") logger.info(f"Test indices: {test_indices}") # Again, training and testing indices are indices into the full dataset # train_indices are img indices, so the length is num_cams * num_train_timesteps # but train_timesteps are timesteps, so the length is num_train_timesteps (len(unique_train_timestamps)) return train_timesteps, test_timesteps, train_indices, test_indices def get_occ(self, index: int): """ Get the Occ3D data of the scene at the given index. """ # from: https://github.com/Tsinghua-MARS-Lab/Occ3D#occ3d-waymo # The dataset contains 15 classes. The definition of classes from 0 to 14 is # 0: TYPE_GENERALOBJECT, 1: TYPE_VEHICLE, 2: TYPE_PEDESTRIAN, 3: TYPE_SIGN, # 4: TYPE_CYCLIST, 5: TYPE_TRAFFIC_LIGHT, 6: TYPE_POLE, 7: TYPE_CONSTRUCTION_CONE, # 8: TYPE_BICYCLE, 9: TYPE_MOTORCYCLE, 10: TYPE_BUILDING, 11: TYPE_VEGETATION, # 12: TYPE_TREE_TRUNK, 13: TYPE_ROAD, 14: TYPE_WALKABLE. self.label_mapping = { 0: "general_obj", 1: "vehicle", 2: "pedestrian", 3: "sign", 4: "cyclist", 5: "traffic_light", 6: "pole", 7: "construction_cone", 8: "bicyle", 9: "motorcycle", 10: "building", 11: "vegetation", 12: "tree_trunck", 13: "road", 14: "walkable", } if self.data_cfg.occ_source.voxel_size == 0.4: occ_path = f"{self.data_path}/occ3d/{index:03d}_04.npz" occupancy_resolution = [100, 200, 16] occupancy_aabb_min = [0, -40, -1] occupancy_aabb_max = [40, 40, 5.4] elif self.data_cfg.occ_source.voxel_size == 0.1: occ_path = f"{self.data_path}/occ3d/{index:03d}.npz" occupancy_resolution = [800, 1600, 64] occupancy_aabb_min = [0, -80, -5] occupancy_aabb_max = [80, 80, 7.8] else: raise NotImplementedError( f"voxel size {self.data_cfg.occ_source.voxel_size} not supported" ) if not os.path.exists(occ_path): raise FileNotFoundError(f"{occ_path} does not exist") # loading the occupancy grid gt_occ = np.load(occ_path) # np.unique(gt_occ['voxel_label']): array([ 0, 1, 2, 3, 6, 8, 9, 10, 11, 12, 13, 14, 23], dtype=uint8) semantic_labels = gt_occ["voxel_label"] # final voxel_state will indicate what voxels are visible from the camera mask_camera = gt_occ["final_voxel_state"] # we don't have back-cameras, so we remove the back part of the grid semantic_labels = semantic_labels[len(semantic_labels) // 2 :, :, :] mask_camera = mask_camera[len(mask_camera) // 2 :, :, :] # semantic_labels == 23 means the free space, i.e. empty semantic_labels[semantic_labels == 23] = 15 # mask_camera == 0 means invisible from the camera semantic_labels[mask_camera == 0] = 15 semantic_labels = ( torch.from_numpy(semantic_labels.copy()).long().to(self.device) ) # compute the coordinates and labels of the occupied voxels occ_coords = torch.nonzero(semantic_labels != 15).float() occ_labels = semantic_labels[semantic_labels != 15] # transform the coordinates from voxel space to world space ego_occ_coords = voxel_coords_to_world_coords( occupancy_aabb_min, occupancy_aabb_max, occupancy_resolution, points=occ_coords, ).to(self.device) world_occ_coords = ( self.lidar_source.lidar_to_worlds[index][:3, :3] @ ego_occ_coords.T + self.lidar_source.lidar_to_worlds[index][:3, 3:4] ).T normed_timestamps = ( torch.ones_like(world_occ_coords[..., 0]) * index / (self.lidar_source.num_timesteps + 1e-6 - 1) ) return world_occ_coords, occ_labels, normed_timestamps def get_valid_lidar_mask(self, lidar_timestep: int, data_dict: dict): # filter out the lidar points that are not visible from the camera lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) valid_mask = torch.zeros_like(lidar_points[:, 0]).bool() # project lidar points to the image plane for i in range(self.pixel_source.num_cams): img_idx = lidar_timestep * self.pixel_source.num_cams + i intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[img_idx], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = ( intrinsic_4x4 @ self.pixel_source.cam_to_worlds[img_idx].inverse() ) projected_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = projected_points[:, 2] cam_points = projected_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) current_valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) valid_mask = valid_mask | current_valid_mask return valid_mask def save_videos(self, video_dict: dict, **kwargs): """ Save the a video of the data. """ return save_videos( render_results=video_dict, save_pth=kwargs["save_pth"], num_timestamps=kwargs["num_timestamps"], keys=kwargs["keys"], num_cams=kwargs["num_cams"], fps=kwargs["fps"], verbose=kwargs["verbose"], save_seperate_video=kwargs["save_seperate_video"], ) def render_data_videos( self, save_pth: str, split: str = "full", fps: int = 24, verbose=True, ): """ Render a video of data. """ pixel_dataset, lidar_dataset = None, None if split == "full": if self.pixel_source is not None: pixel_dataset = self.full_pixel_set if self.lidar_source is not None: lidar_dataset = self.full_lidar_set elif split == "train": if self.pixel_source is not None: pixel_dataset = self.train_pixel_set if self.lidar_source is not None: lidar_dataset = self.train_lidar_set elif split == "test": if self.pixel_source is not None: pixel_dataset = self.test_pixel_set if self.lidar_source is not None: lidar_dataset = self.test_lidar_set else: raise NotImplementedError(f"Split {split} not supported") # pixel source rgb_imgs, dynamic_objects = [], [] sky_masks, feature_pca_colors = [], [] lidar_depths, flow_colors = [], [] for i in trange( len(pixel_dataset), desc="Rendering data videos", dynamic_ncols=True ): data_dict = pixel_dataset[i] if "pixels" in data_dict: rgb_imgs.append(data_dict["pixels"].cpu().numpy()) if "dynamic_masks" in data_dict: dynamic_objects.append( (data_dict["dynamic_masks"].unsqueeze(-1) * data_dict["pixels"]) .cpu() .numpy() ) if "sky_masks" in data_dict: sky_masks.append(data_dict["sky_masks"].cpu().numpy()) if "features" in data_dict: features = data_dict["features"] # use registered parameters to normalize the features for visualization features = features @ self.pixel_source.feat_dimension_reduction_mat features = (features - self.pixel_source.feat_color_min) / ( self.pixel_source.feat_color_max - self.pixel_source.feat_color_min ).clamp(0, 1) feature_pca_colors.append(features.cpu().numpy()) if lidar_dataset is not None: # to deal with asynchronized data # find the closest lidar scan to the current image in time closest_lidar_idx = self.lidar_source.find_closest_timestep( data_dict["normed_timestamps"].flatten()[0] ) data_dict = lidar_dataset[closest_lidar_idx] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) # project lidar points to the image plane # TODO: consider making this a function intrinsic_4x4 = torch.nn.functional.pad( self.pixel_source.intrinsics[i], (0, 1, 0, 1) ) intrinsic_4x4[3, 3] = 1.0 lidar2img = intrinsic_4x4 @ self.pixel_source.cam_to_worlds[i].inverse() lidar_points = ( lidar2img[:3, :3] @ lidar_points.T + lidar2img[:3, 3:4] ).T depth = lidar_points[:, 2] cam_points = lidar_points[:, :2] / (depth.unsqueeze(-1) + 1e-6) valid_mask = ( (cam_points[:, 0] >= 0) & (cam_points[:, 0] < self.pixel_source.WIDTH) & (cam_points[:, 1] >= 0) & (cam_points[:, 1] < self.pixel_source.HEIGHT) & (depth > 0) ) depth = depth[valid_mask] _cam_points = cam_points[valid_mask] depth_map = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH ).to(self.device) depth_map[ _cam_points[:, 1].long(), _cam_points[:, 0].long() ] = depth.squeeze(-1) depth_img = depth_map.cpu().numpy() depth_img = depth_visualizer(depth_img, depth_img > 0) mask = (depth_map.unsqueeze(-1) > 0).cpu().numpy() # show the depth map on top of the rgb image image = rgb_imgs[-1] * (1 - mask) + depth_img * mask lidar_depths.append(image) # project lidar flows to the image plane flow_img = torch.zeros( self.pixel_source.HEIGHT, self.pixel_source.WIDTH, 3 ).to(self.device) # to examine whether the ground labels are correct valid_mask = valid_mask & (~data_dict["lidar_ground"]) _cam_points = cam_points[valid_mask] # final color: # white if no flow, black if ground, and flow color otherwise
flow_color = scene_flow_to_rgb(
5
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/transformer_encoder.py
[ { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Tensor,\n ilens: torch.Tensor,\n prev_states: torch.Tensor = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n raise NotImplementedError" }, { "identifier": "MultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(self, query, key, value):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, mask):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask)" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\n time1 means the length of query vector.\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)[\n :, :, :, : x.size(-1) // 2 + 1\n ] # only keep the positions from 0 to time2\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, 2*time1-1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, 2*time1-1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n Paper: https://arxiv.org/abs/1901.02860\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n self.zero_triu = zero_triu\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x):\n \"\"\"Compute relative positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor.\n\n \"\"\"\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if self.zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(self, query, key, value, pos_emb, mask):\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time1)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)" }, { "identifier": "LayerNorm", "path": "funcodec/modules/layer_norm.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\n \"\"\"Layer normalization module.\n\n Args:\n nout (int): Output dim size.\n dim (int): Dimension to be normalized.\n\n \"\"\"\n\n def __init__(self, nout, dim=-1):\n \"\"\"Construct an LayerNorm object.\"\"\"\n super(LayerNorm, self).__init__(nout, eps=1e-12)\n self.dim = dim\n\n def forward(self, x):\n \"\"\"Apply layer normalization.\n\n Args:\n x (torch.Tensor): Input tensor.\n\n Returns:\n torch.Tensor: Normalized tensor.\n\n \"\"\"\n if self.dim == -1:\n return super(LayerNorm, self).forward(x)\n return (\n super(LayerNorm, self)\n .forward(x.transpose(self.dim, -1))\n .transpose(self.dim, -1)\n )" }, { "identifier": "Conv1dLinear", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\n \"\"\"Conv1D + Linear for Transformer block.\n\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize Conv1dLinear module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(Conv1dLinear, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x))" }, { "identifier": "MultiLayeredConv1d", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\n \"\"\"Multi-layered conv1d for Transformer block.\n\n This is a module of multi-leyered conv1d designed\n to replace positionwise feed-forward network\n in Transforner block, which is introduced in\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\n\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\n https://arxiv.org/pdf/1905.09263.pdf\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize MultiLayeredConv1d module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(MultiLayeredConv1d, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Conv1d(\n hidden_chans,\n in_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)" }, { "identifier": "make_pad_mask", "path": "funcodec/modules/nets_utils.py", "snippet": "def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):\n \"\"\"Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if maxlen is None:\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n else:\n assert xs is None\n assert maxlen >= int(max(lengths))\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask" }, { "identifier": "PositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n reverse (bool): Whether to reverse the input position. Only for\n the class LegacyRelPositionalEncoding. We remove it in the current\n class RelPositionalEncoding.\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super(PositionalEncoding, self).__init__()\n self.d_model = d_model\n self.reverse = reverse\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\n self._register_load_state_dict_pre_hook(_pre_hook)\n\n def extend_pe(self, x):\n \"\"\"Reset the positional encodings.\"\"\"\n if self.pe is not None:\n if self.pe.size(1) >= x.size(1):\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n pe = torch.zeros(x.size(1), self.d_model)\n if self.reverse:\n position = torch.arange(\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\n ).unsqueeze(1)\n else:\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.pe = pe.to(device=x.device, dtype=x.dtype)\n\n def forward(self, x: torch.Tensor):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale + self.pe[:, : x.size(1)]\n return self.dropout(x)" }, { "identifier": "ScaledPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\n \"\"\"Scaled positional encoding module.\n\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\n\n def reset_parameters(self):\n \"\"\"Reset parameters.\"\"\"\n self.alpha.data = torch.tensor(1.0)\n\n def forward(self, x):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x + self.alpha * self.pe[:, : x.size(1)]\n return self.dropout(x)" }, { "identifier": "RelPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\n \"\"\"Relative positional encoding module (new implementation).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n See : Appendix B in https://arxiv.org/abs/1901.02860\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super(RelPositionalEncoding, self).__init__()\n self.d_model = d_model\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.pe = None\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\n\n def extend_pe(self, x):\n \"\"\"Reset the positional encodings.\"\"\"\n if self.pe is not None:\n # self.pe contains both positive and negative parts\n # the length of self.pe is 2 * input_len - 1\n if self.pe.size(1) >= x.size(1) * 2 - 1:\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\n return\n # Suppose `i` means to the position of query vecotr and `j` means the\n # position of key vector. We use position relative positions when keys\n # are to the left (i>j) and negative relative positions otherwise (i<j).\n pe_positive = torch.zeros(x.size(1), self.d_model)\n pe_negative = torch.zeros(x.size(1), self.d_model)\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n pe_positive[:, 0::2] = torch.sin(position * div_term)\n pe_positive[:, 1::2] = torch.cos(position * div_term)\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\n\n # Reserve the order of positive indices and concat both positive and\n # negative indices. This is used to support the shifting trick\n # as in https://arxiv.org/abs/1901.02860\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\n pe_negative = pe_negative[1:].unsqueeze(0)\n pe = torch.cat([pe_positive, pe_negative], dim=1)\n self.pe = pe.to(device=x.device, dtype=x.dtype)\n\n def forward(self, x: torch.Tensor):\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale\n pos_emb = self.pe[\n :,\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\n ]\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "LegacyRelPositionalEncoding", "path": "funcodec/modules/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module (old version).\n\n Details can be found in https://github.com/espnet/espnet/pull/2816.\n\n See : Appendix B in https://arxiv.org/abs/1901.02860\n\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n\n \"\"\"\n\n def __init__(self, d_model, dropout_rate, max_len=5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(\n d_model=d_model,\n dropout_rate=dropout_rate,\n max_len=max_len,\n reverse=True,\n )\n\n def forward(self, x):\n \"\"\"Compute positional encoding.\n\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n\n \"\"\"\n self.extend_pe(x)\n x = x * self.xscale\n pos_emb = self.pe[:, : x.size(1)]\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "PositionwiseFeedForward", "path": "funcodec/modules/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.activation = activation\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n return self.w_2(self.dropout(self.activation(self.w_1(x))))" }, { "identifier": "repeat", "path": "funcodec/modules/repeat.py", "snippet": "def repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])" }, { "identifier": "rename_state_dict", "path": "funcodec/modules/nets_utils.py", "snippet": "def rename_state_dict(\n old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]\n):\n \"\"\"Replace keys of old prefix with new prefix in state dict.\"\"\"\n # need this list not to break the dict iterator\n old_keys = [k for k in state_dict if k.startswith(old_prefix)]\n if len(old_keys) > 0:\n logging.warning(f\"Rename: {old_prefix} -> {new_prefix}\")\n for k in old_keys:\n v = state_dict.pop(k)\n new_k = k.replace(old_prefix, new_prefix)\n state_dict[new_k] = v" }, { "identifier": "DynamicConvolution", "path": "funcodec/modules/dynamic_conv.py", "snippet": "class DynamicConvolution(nn.Module):\n \"\"\"Dynamic Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Dynamic Convolution layer.\"\"\"\n super(DynamicConvolution, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.attn = None\n\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat, n_feat)\n self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)\n nn.init.xavier_uniform(self.linear_weight.weight)\n self.act = nn.GLU()\n\n # dynamic conv related\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Dynamic Convolution'.\n\n This function takes query, key and value but uses only quert.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n k = self.kernel_size\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # get kernel of convolution\n weight = self.linear_weight(x) # B x T x kH\n weight = F.dropout(weight, self.dropout_rate, training=self.training)\n weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k\n weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)\n weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float(\"-inf\"))\n weight_new = weight_new.to(x.device) # B x H x T x T+k-1\n weight_new.as_strided(\n (B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)\n ).copy_(weight)\n weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)\n if self.use_kernel_mask:\n kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)\n weight_new = weight_new.masked_fill(kernel_mask == 0.0, float(\"-inf\"))\n weight_new = F.softmax(weight_new, dim=-1)\n self.attn = weight_new\n weight_new = weight_new.view(B * H, T, T)\n\n # convolution\n x = x.transpose(1, 2).contiguous() # B x C x T\n x = x.view(B * H, int(C / H), T).transpose(1, 2)\n x = torch.bmm(weight_new, x) # BH x T x C/H\n x = x.transpose(1, 2).contiguous().view(B, C, T)\n\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "DynamicConvolution2D", "path": "funcodec/modules/dynamic_conv2d.py", "snippet": "class DynamicConvolution2D(nn.Module):\n \"\"\"Dynamic 2-Dimensional Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Dynamic 2-Dimensional Convolution layer.\"\"\"\n super(DynamicConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n self.attn_t = None\n self.attn_f = None\n\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.linear_weight = nn.Linear(n_feat, self.wshare * 1 * kernel_size)\n nn.init.xavier_uniform(self.linear_weight.weight)\n self.linear_weight_f = nn.Linear(n_feat, kernel_size)\n nn.init.xavier_uniform(self.linear_weight_f.weight)\n self.act = nn.GLU()\n\n # dynamic conv related\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Dynamic 2-Dimensional Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -- -> lightconv -> linear\n # \\ /\n # Linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n k = self.kernel_size\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution of frequency axis\n weight_f = self.linear_weight_f(x).view(B * T, 1, k) # B x T x k\n self.attn_f = weight_f.view(B, T, k).unsqueeze(1)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_f, padding=self.padding_size, groups=B * T\n )\n xf = xf.view(B, T, C)\n\n # get kernel of convolution\n weight = self.linear_weight(x) # B x T x kH\n weight = F.dropout(weight, self.dropout_rate, training=self.training)\n weight = weight.view(B, T, H, k).transpose(1, 2).contiguous() # B x H x T x k\n weight_new = torch.zeros(B * H * T * (T + k - 1), dtype=weight.dtype)\n weight_new = weight_new.view(B, H, T, T + k - 1).fill_(float(\"-inf\"))\n weight_new = weight_new.to(x.device) # B x H x T x T+k-1\n weight_new.as_strided(\n (B, H, T, k), ((T + k - 1) * T * H, (T + k - 1) * T, T + k, 1)\n ).copy_(weight)\n weight_new = weight_new.narrow(-1, int((k - 1) / 2), T) # B x H x T x T(k)\n if self.use_kernel_mask:\n kernel_mask = torch.tril(torch.ones(T, T, device=x.device)).unsqueeze(0)\n weight_new = weight_new.masked_fill(kernel_mask == 0.0, float(\"-inf\"))\n weight_new = F.softmax(weight_new, dim=-1)\n self.attn_t = weight_new\n weight_new = weight_new.view(B * H, T, T)\n\n # convolution\n x = x.transpose(1, 2).contiguous() # B x C x T\n x = x.view(B * H, int(C / H), T).transpose(1, 2)\n x = torch.bmm(weight_new, x)\n x = x.transpose(1, 2).contiguous().view(B, C, T)\n\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "LightweightConvolution", "path": "funcodec/modules/lightconv.py", "snippet": "class LightweightConvolution(nn.Module):\n \"\"\"Lightweight Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Lightweight Convolution layer.\"\"\"\n super(LightweightConvolution, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Lightweight Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "LightweightConvolution2D", "path": "funcodec/modules/lightconv2d.py", "snippet": "class LightweightConvolution2D(nn.Module):\n \"\"\"Lightweight 2-Dimensional Convolution layer.\n\n This implementation is based on\n https://github.com/pytorch/fairseq/tree/master/fairseq\n\n Args:\n wshare (int): the number of kernel of convolution\n n_feat (int): the number of features\n dropout_rate (float): dropout_rate\n kernel_size (int): kernel size (length)\n use_kernel_mask (bool): Use causal mask or not for convolution kernel\n use_bias (bool): Use bias term or not.\n\n \"\"\"\n\n def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n \"\"\"Construct Lightweight 2-Dimensional Convolution layer.\"\"\"\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)\n\n def forward(self, query, key, value, mask):\n \"\"\"Forward of 'Lightweight 2-Dimensional Convolution'.\n\n This function takes query, key and value but uses only query.\n This is just for compatibility with self-attention layer (attention.py)\n\n Args:\n query (torch.Tensor): (batch, time1, d_model) input tensor\n key (torch.Tensor): (batch, time2, d_model) NOT USED\n value (torch.Tensor): (batch, time2, d_model) NOT USED\n mask (torch.Tensor): (batch, time1, time2) mask\n\n Return:\n x (torch.Tensor): (batch, time1, d_model) output\n\n \"\"\"\n # linear -> GLU -> lightconv -> linear\n x = query\n B, T, C = x.size()\n H = self.wshare\n\n # first liner layer\n x = self.linear1(x)\n\n # GLU activation\n x = self.act(x)\n\n # convolution along frequency axis\n weight_f = F.softmax(self.weight_f, dim=-1)\n weight_f = F.dropout(weight_f, self.dropout_rate, training=self.training)\n weight_new = torch.zeros(\n B * T, 1, self.kernel_size, device=x.device, dtype=x.dtype\n ).copy_(weight_f)\n xf = F.conv1d(\n x.view(1, B * T, C), weight_new, padding=self.padding_size, groups=B * T\n ).view(B, T, C)\n\n # lightconv\n x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T\n weight = F.dropout(self.weight, self.dropout_rate, training=self.training)\n if self.use_kernel_mask:\n self.kernel_mask = self.kernel_mask.to(x.device)\n weight = weight.masked_fill(self.kernel_mask == 0.0, float(\"-inf\"))\n weight = F.softmax(weight, dim=-1)\n x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view(\n B, C, T\n )\n if self.use_bias:\n x = x + self.bias.view(1, -1, 1)\n x = x.transpose(1, 2) # B x T x C\n x = torch.cat((x, xf), -1) # B x T x Cx2\n\n if mask is not None and not self.use_kernel_mask:\n mask = mask.transpose(-1, -2)\n x = x.masked_fill(mask == 0, 0.0)\n\n # second linear layer\n x = self.linear2(x)\n return x" }, { "identifier": "Conv2dSubsampling", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\n super(Conv2dSubsampling, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling2", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling2(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/2 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling2 object.\"\"\"\n super(Conv2dSubsampling2, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 1),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2)), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 2.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 2.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:1]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling6", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling6(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super(Conv2dSubsampling6, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-4:3]" }, { "identifier": "Conv2dSubsampling8", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling8(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super(Conv2dSubsampling8, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]" }, { "identifier": "TooShortUttError", "path": "funcodec/modules/subsampling.py", "snippet": "class TooShortUttError(Exception):\n \"\"\"Raised when the utt is too short for subsampling.\n\n Args:\n message (str): Message for error catch\n actual_size (int): the short size that cannot pass the subsampling\n limit (int): the limit size for subsampling\n\n \"\"\"\n\n def __init__(self, message, actual_size, limit):\n \"\"\"Construct a TooShortUttError for error handler.\"\"\"\n super().__init__(message)\n self.actual_size = actual_size\n self.limit = limit" }, { "identifier": "check_short_utt", "path": "funcodec/modules/subsampling.py", "snippet": "def check_short_utt(ins, size):\n \"\"\"Check if the utterance is too short for subsampling.\"\"\"\n if isinstance(ins, Conv2dSubsampling2) and size < 3:\n return True, 3\n if isinstance(ins, Conv2dSubsampling) and size < 7:\n return True, 7\n if isinstance(ins, Conv2dSubsampling6) and size < 11:\n return True, 11\n if isinstance(ins, Conv2dSubsampling8) and size < 15:\n return True, 15\n return False, -1" } ]
from typing import List from typing import Optional from typing import Tuple from torch import nn from funcodec.models.encoder.abs_encoder import AbsEncoder from funcodec.modules.attention import ( MultiHeadedAttention, RelPositionMultiHeadedAttention, # noqa: H301 LegacyRelPositionMultiHeadedAttention, # noqa: H301 ) from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.embedding import ( PositionalEncoding, # noqa: H301 ScaledPositionalEncoding, # noqa: H301 RelPositionalEncoding, # noqa: H301 LegacyRelPositionalEncoding, # noqa: H301 ) from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.nets_utils import rename_state_dict from funcodec.modules.dynamic_conv import DynamicConvolution from funcodec.modules.dynamic_conv2d import DynamicConvolution2D from funcodec.modules.lightconv import LightweightConvolution from funcodec.modules.lightconv2d import LightweightConvolution2D from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt import torch import logging
18,455
if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( output_size,
# Copyright 2019 Shigeki Karita # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Transformer encoder definition.""" class EncoderLayer(nn.Module): """Encoder layer module. Args: size (int): Input dimension. self_attn (torch.nn.Module): Self-attention module instance. `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance can be used as the argument. feed_forward (torch.nn.Module): Feed-forward module instance. `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance can be used as the argument. dropout_rate (float): Dropout rate. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) stochastic_depth_rate (float): Proability to skip this layer. During training, the layer may skip residual computation and return input as-is with given probability. """ def __init__( self, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayer, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate def forward(self, x, mask, cache=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ if isinstance(x, tuple): x, pos_emb = x[0], x[1] else: x, pos_emb = x, None skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask residual = x if self.normalize_before: x = self.norm1(x) if cache is None: x_q = x else: assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size) x_q = x[:, -1:, :] residual = residual[:, -1:, :] mask = None if mask is None else mask[:, -1:, :] if pos_emb is not None: x_att = self.self_attn(x_q, x, x, pos_emb, mask) else: x_att = self.self_attn(x_q, x, x, mask) if self.concat_after: x_concat = torch.cat((x, x_att), dim=-1) x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = residual + stoch_layer_coeff * self.dropout(x_att) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) if cache is not None: x = torch.cat([cache, x], dim=1) if pos_emb is not None: return (x, pos_emb), mask return x, mask class TransformerEncoder(AbsEncoder): """Transformer encoder module. Args: input_size: input dim output_size: dimension of attention attention_heads: the number of heads of multi head attention linear_units: the number of units of position-wise feed forward num_blocks: the number of decoder blocks dropout_rate: dropout rate attention_dropout_rate: dropout rate in attention positional_dropout_rate: dropout rate after adding positional encoding input_layer: input layer type pos_enc_class: PositionalEncoding or ScaledPositionalEncoding normalize_before: whether to use layer_norm before the first block concat_after: whether to concat attention layer's input and output if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type: linear of conv1d positionwise_conv_kernel_size: kernel size of positionwise conv1d layer padding_idx: padding_idx for input_layer=embed """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=PositionalEncoding, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, causal_mode: str = "None", ): super().__init__() self._output_size = output_size self.causal_mode = causal_mode if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") self.encoders = repeat( num_blocks, lambda lnum: EncoderLayer( output_size,
MultiHeadedAttention(
1
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2.py
[ { "identifier": "get_moreDA_augmentation", "path": "nn_transunet/data/data_augmentation_moreDA.py", "snippet": "def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1,\n seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,\n soft_ds=False,\n classes=None, pin_memory=True, regions=None,\n use_nondetMultiThreadedAugmenter: bool = False,\n is_spatial_aug_only=False, reclip=None):\n\n # default_3D_augmentation_params: {'selected_data_channels': None, 'selected_seg_channels': [0], 'do_elastic': False, 'elastic_deform_alpha': (0.0, 900.0), 'elastic_deform_sigma': (9.0, 13.0), 'p_eldef': 0.2, 'do_scaling': True, 'scale_range': (0.7, 1.4), 'independent_scale_factor_for_each_axis': False, 'p_independent_scale_per_axis': 1, 'p_scale': 0.2, 'do_rotation': True, 'rotation_x': (-0.5235987755982988, 0.5235987755982988), 'rotation_y': (-0.5235987755982988, 0.5235987755982988), 'rotation_z': (-0.5235987755982988, 0.5235987755982988), 'rotation_p_per_axis': 1, 'p_rot': 0.2, 'random_crop': False, 'random_crop_dist_to_border': None, 'do_gamma': True, 'gamma_retain_stats': True, 'gamma_range': (0.7, 1.5), 'p_gamma': 0.3, 'do_mirror': True, 'mirror_axes': (0, 1, 2), 'dummy_2D': False, 'mask_was_used_for_normalization': OrderedDict([(0, False)]), 'border_mode_data': 'constant', 'all_segmentation_labels': None, 'move_last_seg_chanel_to_data': False, 'cascade_do_cascade_augmentations': False, 'cascade_random_binary_transform_p': 0.4, 'cascade_random_binary_transform_p_per_label': 1, 'cascade_random_binary_transform_size': (1, 8), 'cascade_remove_conn_comp_p': 0.2, 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, 'do_additive_brightness': False, 'additive_brightness_p_per_sample': 0.15, 'additive_brightness_p_per_channel': 0.5, 'additive_brightness_mu': 0.0, 'additive_brightness_sigma': 0.1, 'num_threads': 12, 'num_cached_per_thread': 2, 'patch_size_for_spatialtransform': [64, 128, 128]} \n\n assert params.get('mirror') is None, \"old version of params, use new keyword do_mirror\"\n\n tr_transforms = []\n\n\n if params.get(\"selected_data_channels\") is not None:\n tr_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n\n if params.get(\"selected_seg_channels\") is not None:\n tr_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!\n if params.get(\"dummy_2D\") is not None and params.get(\"dummy_2D\"):\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=params.get(\"do_elastic\"), alpha=params.get(\"elastic_deform_alpha\"),\n sigma=params.get(\"elastic_deform_sigma\"),\n do_rotation=params.get(\"do_rotation\"), angle_x=params.get(\"rotation_x\"), angle_y=params.get(\"rotation_y\"),\n angle_z=params.get(\"rotation_z\"), p_rot_per_axis=params.get(\"rotation_p_per_axis\"),\n do_scale=params.get(\"do_scaling\"), scale=params.get(\"scale_range\"),\n border_mode_data=params.get(\"border_mode_data\"), border_cval_data=0, order_data=order_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg,\n order_seg=order_seg, random_crop=params.get(\"random_crop\"), p_el_per_sample=params.get(\"p_eldef\"),\n p_scale_per_sample=params.get(\"p_scale\"), p_rot_per_sample=params.get(\"p_rot\"),\n independent_scale_for_each_axis=params.get(\"independent_scale_factor_for_each_axis\")\n ))\n\n if params.get(\"dummy_2D\"):\n tr_transforms.append(Convert2DTo3DTransform())\n\n # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color\n # channel gets in the way\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) # a kind of noise transform\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n\n if params.get(\"do_additive_brightness\"):\n tr_transforms.append(BrightnessTransform(params.get(\"additive_brightness_mu\"),\n params.get(\"additive_brightness_sigma\"),\n True, p_per_sample=params.get(\"additive_brightness_p_per_sample\"),\n p_per_channel=params.get(\"additive_brightness_p_per_channel\")))\n\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), True, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=0.1)) # inverted gamma, a kind of color transform\n\n if params.get(\"do_gamma\"):\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), False, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=params[\"p_gamma\"]))\n if params.get(\"do_mirror\") or params.get(\"mirror\"):\n tr_transforms.append(MirrorTransform(params.get(\"mirror_axes\")))\n\n if params.get(\"mask_was_used_for_normalization\") is not None:\n mask_was_used_for_normalization = params.get(\"mask_was_used_for_normalization\")\n tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))\n # Replaces all pixels in data_dict[input_key] that have value remove_label with replace_with and saves the result to data_dict[output_key]\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"): # only used for cascade\n print(\"only used for cascaded!\")\n raise NotImplementedError\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_train = NonDetMultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"), seeds=seeds_train,\n pin_memory=pin_memory)\n else:\n batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_train, pin_memory=pin_memory)\n # batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)\n # import IPython;IPython.embed()\n\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n if params.get(\"selected_data_channels\") is not None:\n val_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n if params.get(\"selected_seg_channels\") is not None:\n val_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"):\n print(\"only used for cascaded!\")\n raise NotImplementedError\n # val_transforms.append(MoveSegAsOneHotToData(1, params.get(\"all_segmentation_labels\"), 'seg', 'data'))\n\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_val = NonDetMultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n else:\n batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n # batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)\n return batchgenerator_train, batchgenerator_val" }, { "identifier": "MultipleOutputLoss2", "path": "nn_transunet/trainer/loss_functions.py", "snippet": "class MultipleOutputLoss2(nn.Module):\n def __init__(self, loss, weight_factors=None):\n \"\"\"\n use this if you have several outputs and ground truth (both list of same len) and the loss should be computed\n between them (x[0] and y[0], x[1] and y[1] etc)\n :param loss:\n :param weight_factors:\n \"\"\"\n super(MultipleOutputLoss2, self).__init__()\n self.weight_factors = weight_factors\n self.loss = loss\n\n def forward(self, x, y):\n assert isinstance(x, (tuple, list)), \"x must be either tuple or list\"\n assert isinstance(y, (tuple, list)), \"y must be either tuple or list\"\n if self.weight_factors is None:\n weights = [1] * len(x)\n else:\n weights = self.weight_factors\n\n l = weights[0] * self.loss(x[0], y[0])\n for i in range(1, len(x)):\n if weights[i] != 0:\n l += weights[i] * self.loss(x[i], y[i])\n return l" }, { "identifier": "maybe_to_torch", "path": "nn_transunet/trainer/network_trainer.py", "snippet": "def maybe_to_torch(d):\n if isinstance(d, list):\n d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d]\n elif not isinstance(d, torch.Tensor):\n d = torch.from_numpy(d).float()\n return d" }, { "identifier": "to_cuda", "path": "nn_transunet/trainer/network_trainer.py", "snippet": "def to_cuda(data, non_blocking=True, gpu_id=0):\n if isinstance(data, list):\n data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]\n else:\n data = data.cuda(gpu_id, non_blocking=non_blocking)\n return data" }, { "identifier": "nnUNetTrainer", "path": "nn_transunet/trainer/nnUNetTrainer.py", "snippet": "class nnUNetTrainer(NetworkTrainer):\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False):\n \"\"\"\n :param deterministic:\n :param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or\n None if you wish to load some checkpoint and do inference only\n :param plans_file: the pkl file generated by preprocessing. This file will determine all design choices\n :param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder,\n not the entire path). This is where the preprocessed data lies that will be used for network training. We made\n this explicitly available so that differently preprocessed data can coexist and the user can choose what to use.\n Can be None if you are doing inference only.\n :param output_folder: where to store parameters, plot progress and to the validation\n :param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required\n because the split information is stored in this directory. For running prediction only this input is not\n required and may be set to None\n :param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the\n batch is a pseudo volume?\n :param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be\n specified for training:\n if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0\n :param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but\n is considerably slower! Running unpack_data=False with 2d should never be done!\n\n IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args\n in your init accordingly. Otherwise checkpoints won't load properly!\n \"\"\"\n super(nnUNetTrainer, self).__init__(deterministic, fp16)\n self.unpack_data = unpack_data\n self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,\n deterministic, fp16)\n # set through arguments from init\n self.stage = stage\n self.experiment_name = self.__class__.__name__\n self.plans_file = plans_file\n self.output_folder = output_folder\n self.dataset_directory = dataset_directory\n self.output_folder_base = self.output_folder\n self.fold = fold\n self.pin_memory = True\n\n self.plans = None\n\n # if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it\n # irrelevant\n if self.dataset_directory is not None and isdir(self.dataset_directory):\n self.gt_niftis_folder = join(\n self.dataset_directory, \"gt_segmentations\")\n else:\n self.gt_niftis_folder = None\n\n self.folder_with_preprocessed_data = None\n\n # set in self.initialize()\n\n self.dl_tr = self.dl_val = None\n self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \\\n self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \\\n self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file\n\n self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None\n\n self.batch_dice = batch_dice\n self.loss = DC_and_CE_loss(\n {'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})\n # self.loss = PartiallyCrossEntropyLoss()\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \\\n self.min_region_size_per_class = self.min_size_per_class = None\n\n self.inference_pad_border_mode = \"constant\"\n self.inference_pad_kwargs = {'constant_values': 0}\n\n self.update_fold(fold)\n self.pad_all_sides = None\n\n self.lr_scheduler_eps = 1e-3\n self.lr_scheduler_patience = 30\n self.initial_lr = 1e-2\n # self.initial_lr = 1e-3\n self.weight_decay = 3e-5\n\n self.oversample_foreground_percent = 0.33\n\n self.conv_per_stage = None\n self.regions_class_order = None\n\n def update_fold(self, fold):\n \"\"\"\n used to swap between folds for inference (ensemble of models from cross-validation)\n DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS\n :param fold:\n :return:\n \"\"\"\n if fold is not None:\n if isinstance(fold, str):\n assert fold.startswith(\"all\"), \"if self.fold is a string then it must be \\'all\\'\"\n # assert fold == \"all\", \"if self.fold is a string then it must be \\'all\\'\"\n if self.output_folder.endswith(\"%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(self.output_folder, \"%s\" % str(fold))\n else:\n if self.output_folder.endswith(\"fold_%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(\n self.output_folder, \"fold_%s\" % str(fold))\n self.fold = fold\n\n def setup_DA_params(self):\n if self.threeD:\n self.data_aug_params = default_3D_augmentation_params\n if self.do_dummy_2D_aug:\n self.data_aug_params[\"dummy_2D\"] = True\n self.print_to_log_file(\"Using dummy2d data augmentation\")\n self.data_aug_params[\"elastic_deform_alpha\"] = \\\n default_2D_augmentation_params[\"elastic_deform_alpha\"]\n self.data_aug_params[\"elastic_deform_sigma\"] = \\\n default_2D_augmentation_params[\"elastic_deform_sigma\"]\n self.data_aug_params[\"rotation_x\"] = default_2D_augmentation_params[\"rotation_x\"]\n else:\n self.do_dummy_2D_aug = False\n if max(self.patch_size) / min(self.patch_size) > 1.5:\n default_2D_augmentation_params['rotation_x'] = (\n -15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)\n self.data_aug_params = default_2D_augmentation_params\n self.data_aug_params[\"mask_was_used_for_normalization\"] = self.use_mask_for_norm\n\n if self.do_dummy_2D_aug:\n self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],\n self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n self.basic_generator_patch_size = np.array(\n [self.patch_size[0]] + list(self.basic_generator_patch_size))\n else:\n self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n\n self.data_aug_params['selected_seg_channels'] = [0]\n self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size\n\n def initialize(self, training=True, force_load_plans=False):\n \"\"\"\n For prediction of test cases just set training=False, this will prevent loading of training data and\n training batchgenerator initialization\n :param training:\n :return:\n \"\"\"\n\n maybe_mkdir_p(self.output_folder)\n\n if force_load_plans or (self.plans is None):\n self.load_plans_file()\n \n self.process_plans(self.plans)\n\n self.setup_DA_params()\n\n if training:\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % self.stage)\n\n self.dl_tr, self.dl_val = self.get_basic_generators()\n if self.unpack_data:\n self.print_to_log_file(\"unpacking dataset\")\n unpack_dataset(self.folder_with_preprocessed_data)\n self.print_to_log_file(\"done\")\n else:\n self.print_to_log_file(\n \"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you \"\n \"will wait all winter for your model to finish!\")\n \n self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params)\n self.print_to_log_file(\"TRAINING KEYS:\\n %s\" % (str(self.dataset_tr.keys())),\n also_print_to_console=False)\n self.print_to_log_file(\"VALIDATION KEYS:\\n %s\" % (str(self.dataset_val.keys())),\n also_print_to_console=False)\n else:\n pass\n self.initialize_network()\n self.initialize_optimizer_and_scheduler()\n # assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))\n self.was_initialized = True\n\n def initialize_network(self):\n \"\"\"\n This is specific to the U-Net and must be adapted for other network architectures\n :return:\n \"\"\"\n # self.print_to_log_file(self.net_num_pool_op_kernel_sizes)\n # self.print_to_log_file(self.net_conv_kernel_sizes)\n\n net_numpool = len(self.net_num_pool_op_kernel_sizes)\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(\n 1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)\n\n # self.network.inference_apply_nonlin = softmax_helper\n # self.network = UNet(self.num_input_channels, self.num_classes)\n # self.network = smp.Unet(encoder_name='resnet50', encoder_weights='imagenet',\n # in_channels=self.num_input_channels, classes=self.num_classes)\n # self.network = smp.DeepLabV3Plus(encoder_name='resnet50', encoder_weights='imagenet',\n # in_channels=self.num_input_channels, classes=self.num_classes)\n # self.network = Attention_UNet(feature_scale=2, n_classes=self.num_classes, is_deconv=True, in_channels=self.num_input_channels)\n # self.network = VNet(n_channels=self.num_input_channels, n_classes=self.num_classes)\n # self.network = NestedUNet(num_classes=self.num_classes, input_channels=self.num_input_channels)\n if torch.cuda.is_available():\n self.network.cuda()\n # checkpoint = torch.load(\"/mnt/lustre/luoxiangde.vendor/projects/nnUNetFrame/DATASET/pCE_model_latest.model\")\n # print(\"Load Weighted Successful\")\n # weights = checkpoint['state_dict']\n # self.network.load_state_dict(weights, strict=False)\n # self.network.half()\n\n # def initialize_optimizer_and_scheduler(self):\n # assert self.network is not None, \"self.initialize_network must be called first\"\n # self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n # amsgrad=True)\n # self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,\n # patience=self.lr_scheduler_patience,\n # verbose=True, threshold=self.lr_scheduler_eps,\n # threshold_mode=\"abs\")\n def initialize_optimizer_and_scheduler(self):\n assert self.network is not None, \"self.initialize_network must be called first\"\n self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n self.lr_scheduler = None\n\n def save_debug_information(self):\n # saving some debug information\n dct = OrderedDict()\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)):\n dct[k] = str(getattr(self, k))\n del dct['plans']\n del dct['intensity_properties']\n del dct['dataset']\n del dct['dataset_tr']\n del dct['dataset_val']\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n import shutil\n\n shutil.copy(self.plans_file, join(\n self.output_folder_base, \"plans.pkl\"))\n\n def run_training(self):\n self.save_debug_information()\n super(nnUNetTrainer, self).run_training()\n\n def load_plans_file(self):\n \"\"\"\n This is what actually configures the entire experiment. The plans file is generated by experiment planning\n :return:\n \"\"\"\n self.plans = load_pickle(self.plans_file)\n\n\n def process_plans(self, plans):\n if self.stage is None:\n assert len(list(plans['plans_per_stage'].keys())) == 1, \\\n \"If self.stage is None then there can be only one stage in the plans file. That seems to not be the \" \\\n \"case. Please specify which stage of the cascade must be trained\"\n self.stage = list(plans['plans_per_stage'].keys())[0]\n self.plans = plans\n\n stage_plans = self.plans['plans_per_stage'][self.stage]\n self.batch_size = stage_plans['batch_size']\n # self.batch_size = 4\n self.net_pool_per_axis = stage_plans['num_pool_per_axis']\n self.patch_size = np.array(stage_plans['patch_size']).astype(int)\n self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug']\n\n if 'pool_op_kernel_sizes' not in stage_plans.keys():\n assert 'num_pool_per_axis' in stage_plans.keys()\n self.print_to_log_file(\n \"WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...\")\n self.net_num_pool_op_kernel_sizes = []\n for i in range(max(self.net_pool_per_axis)):\n curr = []\n for j in self.net_pool_per_axis:\n if (max(self.net_pool_per_axis) - j) <= i:\n curr.append(2)\n else:\n curr.append(1)\n self.net_num_pool_op_kernel_sizes.append(curr)\n else:\n self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']\n\n if 'conv_kernel_sizes' not in stage_plans.keys():\n self.print_to_log_file(\n \"WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...\")\n self.net_conv_kernel_sizes = [\n [3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1)\n else:\n self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes']\n\n self.pad_all_sides = None # self.patch_size\n self.intensity_properties = plans['dataset_properties']['intensityproperties']\n self.normalization_schemes = plans['normalization_schemes']\n self.base_num_features = plans['base_num_features']\n self.num_input_channels = plans['num_modalities']\n # background is no longer in num_classes\n self.num_classes = plans['num_classes'] + 1\n self.classes = plans['all_classes']\n self.use_mask_for_norm = plans['use_mask_for_norm']\n self.only_keep_largest_connected_component = plans['keep_only_largest_region']\n self.min_region_size_per_class = plans['min_region_size_per_class']\n # DONT USE THIS. plans['min_size_per_class']\n self.min_size_per_class = None\n\n if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None:\n print(\"WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. \"\n \"You should rerun preprocessing. We will proceed and assume that both transpose_foward \"\n \"and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!\")\n plans['transpose_forward'] = [0, 1, 2]\n plans['transpose_backward'] = [0, 1, 2]\n self.transpose_forward = plans['transpose_forward']\n self.transpose_backward = plans['transpose_backward']\n\n if len(self.patch_size) == 2:\n self.threeD = False\n elif len(self.patch_size) == 3:\n self.threeD = True\n else:\n raise RuntimeError(\n \"invalid patch size in plans file: %s\" % str(self.patch_size))\n\n if \"conv_per_stage\" in plans.keys(): # this ha sbeen added to the plans only recently\n self.conv_per_stage = plans['conv_per_stage']\n else:\n self.conv_per_stage = 2\n\n def load_dataset(self):\n self.dataset = load_dataset(self.folder_with_preprocessed_data)\n\n def get_basic_generators(self):\n self.load_dataset()\n self.do_split()\n\n if self.threeD:\n # dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n # False, oversample_foreground_percent=self.oversample_foreground_percent,\n # pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r', labeled_cases=10)\n # dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,\n # oversample_foreground_percent=self.oversample_foreground_percent,\n # pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r', labeled_cases=10)\n dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n False, oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n else:\n dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n return dl_tr, dl_val\n\n\n def preprocess_patient(self, input_files):\n \"\"\"\n Used to predict new unseen data. Not used for the preprocessing of the training/test data\n :param input_files:\n :return:\n \"\"\"\n preprocessor_name = self.plans.get('preprocessor_name')\n if preprocessor_name is None:\n if self.threeD:\n preprocessor_name = \"GenericPreprocessor\"\n preprocessor_class = GenericPreprocessor\n else:\n preprocessor_name = \"PreprocessorFor2D\"\n preprocessor_class = PreprocessorFor2D\n if preprocessor_name == \"GenericPreprocessor\":\n preprocessor_class = GenericPreprocessor\n else:\n preprocessor_class = PreprocessorFor2D\n assert preprocessor_class is not None, \"Could not find preprocessor %s in nnunet.preprocessing\" % \\\n preprocessor_name\n preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm,\n self.transpose_forward, self.intensity_properties)\n\n d, s, properties = preprocessor.preprocess_test_case(input_files,\n self.plans['plans_per_stage'][self.stage][\n 'current_spacing'])\n return d, s, properties\n\n def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None,\n softmax_ouput_file: str = None, mixed_precision: bool = True) -> None:\n \"\"\"\n Use this to predict new data\n :param input_files:\n :param output_file:\n :param softmax_ouput_file:\n :param mixed_precision:\n :return:\n \"\"\"\n print(\"preprocessing...\")\n d, s, properties = self.preprocess_patient(input_files)\n print(\"predicting...\")\n pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params[\"do_mirror\"],\n mirror_axes=self.data_aug_params['mirror_axes'],\n use_sliding_window=True, step_size=0.5,\n use_gaussian=True, pad_border_mode='constant',\n pad_kwargs={\n 'constant_values': 0},\n verbose=True, all_in_gpu=False,\n mixed_precision=mixed_precision)[1]\n pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])\n\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n\n print(\"resampling to original spacing and nifti export...\")\n save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order,\n self.regions_class_order, None, None, softmax_ouput_file,\n None, force_separate_z=force_separate_z,\n interpolation_order_z=interpolation_order_z)\n print(\"done\")\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n :param data:\n :param do_mirroring:\n :param mirror_axes:\n :param use_sliding_window:\n :param step_size:\n :param use_gaussian:\n :param pad_border_mode:\n :param pad_kwargs:\n :param all_in_gpu:\n :param verbose:\n :return:\n \"\"\"\n if pad_border_mode == 'constant' and pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n if do_mirroring and mirror_axes is None:\n mirror_axes = self.data_aug_params['mirror_axes']\n\n if do_mirroring:\n assert self.data_aug_params[\"do_mirror\"], \"Cannot do mirroring as test time augmentation when training \" \\\n \"was done without mirroring\"\n\n # valid = list((SegmentationNetwork, nn.DataParallel))\n # print(self.network)\n # assert isinstance(self.network, tuple(valid))\n\n current_mode = self.network.training\n self.network.eval()\n ret = SegmentationNetwork.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window, step_size=step_size,\n patch_size=self.patch_size, regions_class_order=self.regions_class_order,\n use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.train(current_mode)\n return ret\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,\n save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n \"\"\"\n if debug=True then the temporary files generated for postprocessing determination will be kept\n \"\"\"\n\n current_mode = self.network.training\n self.network.eval()\n\n assert self.was_initialized, \"must initialize, ideally with checkpoint (or train first)\"\n if self.dataset_val is None:\n self.load_dataset()\n self.do_split()\n\n if segmentation_export_kwargs is None:\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n else:\n force_separate_z = segmentation_export_kwargs['force_separate_z']\n interpolation_order = segmentation_export_kwargs['interpolation_order']\n interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']\n\n # predictions as they come from the network go here\n output_folder = join(self.output_folder, validation_folder_name)\n maybe_mkdir_p(output_folder)\n # this is for debug purposes\n my_input_args = {'do_mirroring': do_mirroring,\n 'use_sliding_window': use_sliding_window,\n 'step_size': step_size,\n 'save_softmax': save_softmax,\n 'use_gaussian': use_gaussian,\n 'overwrite': overwrite,\n 'validation_folder_name': validation_folder_name,\n 'debug': debug,\n 'all_in_gpu': all_in_gpu,\n 'segmentation_export_kwargs': segmentation_export_kwargs,\n }\n save_json(my_input_args, join(output_folder, \"validation_args.json\"))\n\n if do_mirroring:\n if not self.data_aug_params['do_mirror']:\n raise RuntimeError(\n \"We did not train with mirroring so you cannot do inference with mirroring enabled\")\n mirror_axes = self.data_aug_params['mirror_axes']\n else:\n mirror_axes = ()\n\n pred_gt_tuples = []\n\n export_pool = Pool(default_num_threads)\n results = []\n\n for k in self.dataset_val.keys():\n properties = load_pickle(self.dataset[k]['properties_file'])\n fname = properties['list_of_data_files'][0].split(\"/\")[-1][:-12]\n if overwrite or (not isfile(join(output_folder, fname + \".nii.gz\"))) or \\\n (save_softmax and not isfile(join(output_folder, fname + \".npz\"))):\n data = np.load(self.dataset[k]['data_file'])['data']\n\n print(k, data.shape)\n data[-1][data[-1] == -1] = 0\n\n softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],\n do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size,\n use_gaussian=use_gaussian,\n all_in_gpu=all_in_gpu,\n mixed_precision=self.fp16)[1]\n\n softmax_pred = softmax_pred.transpose(\n [0] + [i + 1 for i in self.transpose_backward])\n\n if save_softmax:\n softmax_fname = join(output_folder, fname + \".npz\")\n else:\n softmax_fname = None\n\n \"\"\"There is a problem with python process communication that prevents us from communicating obejcts\n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is\n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long\n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually\n patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will\n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either\n filename or np.ndarray and will handle this automatically\"\"\"\n if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save\n np.save(join(output_folder, fname + \".npy\"), softmax_pred)\n softmax_pred = join(output_folder, fname + \".npy\")\n\n results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,\n ((softmax_pred, join(output_folder, fname + \".nii.gz\"),\n properties, interpolation_order, self.regions_class_order,\n None, None,\n softmax_fname, None, force_separate_z,\n interpolation_order_z),\n )\n )\n )\n\n pred_gt_tuples.append([join(output_folder, fname + \".nii.gz\"),\n join(self.gt_niftis_folder, fname + \".nii.gz\")])\n\n _ = [i.get() for i in results]\n self.print_to_log_file(\"finished prediction\")\n\n # evaluate raw predictions\n self.print_to_log_file(\"evaluation of raw predictions\")\n task = self.dataset_directory.split(\"/\")[-1]\n job_name = self.experiment_name\n _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),\n json_output_file=join(\n output_folder, \"summary.json\"),\n json_name=job_name +\n \" val tiled %s\" % (str(use_sliding_window)),\n json_author=\"Fabian\",\n json_task=task, num_threads=default_num_threads)\n\n # if run_postprocessing_on_folds:\n # # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything\n # # except the largest connected component for each class. To see if this improves results, we do this for all\n # # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will\n # # have this applied during inference as well\n # self.print_to_log_file(\"determining postprocessing\")\n # determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,\n # final_subf_name=validation_folder_name + \"_postprocessed\", debug=debug)\n # # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + \"_postprocessed\"\n # # They are always in that folder, even if no postprocessing as applied!\n\n # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another\n # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be\n # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to\n # be used later\n gt_nifti_folder = join(self.output_folder_base, \"gt_niftis\")\n maybe_mkdir_p(gt_nifti_folder)\n for f in subfiles(self.gt_niftis_folder, suffix=\".nii.gz\"):\n success = False\n attempts = 0\n e = None\n while not success and attempts < 10:\n try:\n shutil.copy(f, gt_nifti_folder)\n success = True\n except OSError as e:\n attempts += 1\n sleep(1)\n if not success:\n print(\"Could not copy gt nifti file %s into folder %s\" %\n (f, gt_nifti_folder))\n if e is not None:\n raise e\n\n self.network.train(current_mode)\n\n def run_online_evaluation(self, output, target):\n with torch.no_grad():\n num_classes = output.shape[1]\n output_softmax = softmax_helper(output)\n output_seg = output_softmax.argmax(1)\n target = target[:, 0]\n axes = tuple(range(1, len(target.shape)))\n tp_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fp_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fn_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n for c in range(1, num_classes):\n tp_hard[:, c - 1] = sum_tensor((output_seg == c).float()\n * (target == c).float(), axes=axes)\n fp_hard[:, c - 1] = sum_tensor((output_seg == c).float()\n * (target != c).float(), axes=axes)\n fn_hard[:, c - 1] = sum_tensor((output_seg != c).float()\n * (target == c).float(), axes=axes)\n\n tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy()\n\n self.online_eval_foreground_dc.append(\n list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))\n self.online_eval_tp.append(list(tp_hard))\n self.online_eval_fp.append(list(fp_hard))\n self.online_eval_fn.append(list(fn_hard))\n\n def finish_online_evaluation(self):\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n self.all_val_eval_metrics.append(np.mean(global_dc_per_class))\n\n self.print_to_log_file(\"Average global foreground Dice:\", [\n np.round(i, 4) for i in global_dc_per_class])\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n def save_checkpoint(self, fname, save_optimizer=True):\n super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer)\n info = OrderedDict()\n info['init'] = self.init_args\n info['name'] = self.__class__.__name__\n info['class'] = str(self.__class__)\n info['plans'] = self.plans\n\n write_pickle(info, fname + \".pkl\")" }, { "identifier": "Generic_UNet", "path": "nn_transunet/networks/nnunet_model.py", "snippet": "class Generic_UNet(SegmentationNetwork):\n DEFAULT_BATCH_SIZE_3D = 2\n DEFAULT_PATCH_SIZE_3D = (64, 192, 160)\n SPACING_FACTOR_BETWEEN_STAGES = 2\n BASE_NUM_FEATURES_3D = 30\n MAX_NUMPOOL_3D = 999\n MAX_NUM_FILTERS_3D = 320\n\n DEFAULT_PATCH_SIZE_2D = (256, 256)\n BASE_NUM_FEATURES_2D = 30\n DEFAULT_BATCH_SIZE_2D = 50\n MAX_NUMPOOL_2D = 999\n MAX_FILTERS_2D = 480\n\n use_this_for_batch_size_computation_2D = 19739648\n use_this_for_batch_size_computation_3D = 520000000 # 505789440\n\n def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,\n feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,\n final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,\n conv_kernel_sizes=None,\n upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,\n max_num_features=None, basic_block=ConvDropoutNormNonlin,\n seg_output_use_bias=False):\n \"\"\"\n basically more flexible than v1, architecture is the same\n\n Does this look complicated? Nah bro. Functionality > usability\n\n This does everything you need, including world peace.\n\n Questions? -> [email protected]\n \"\"\"\n super(Generic_UNet, self).__init__()\n self.convolutional_upsampling = convolutional_upsampling\n self.convolutional_pooling = convolutional_pooling\n self.upscale_logits = upscale_logits\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n\n self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin = nonlin\n self.nonlin_kwargs = nonlin_kwargs\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.weightInitializer = weightInitializer\n self.conv_op = conv_op\n self.norm_op = norm_op\n self.dropout_op = dropout_op\n self.num_classes = num_classes\n self.final_nonlin = final_nonlin\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n\n if conv_op == nn.Conv2d:\n upsample_mode = 'bilinear'\n pool_op = nn.MaxPool2d\n transpconv = nn.ConvTranspose2d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3)] * (num_pool + 1)\n elif conv_op == nn.Conv3d:\n upsample_mode = 'trilinear'\n pool_op = nn.MaxPool3d\n transpconv = nn.ConvTranspose3d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)\n else:\n raise ValueError(\"unknown convolution dimensionality, conv op: %s\" % str(conv_op))\n\n self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)\n self.pool_op_kernel_sizes = pool_op_kernel_sizes\n self.conv_kernel_sizes = conv_kernel_sizes\n\n self.conv_pad_sizes = []\n for krnl in self.conv_kernel_sizes:\n self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])\n\n if max_num_features is None:\n if self.conv_op == nn.Conv3d:\n self.max_num_features = self.MAX_NUM_FILTERS_3D\n else:\n self.max_num_features = self.MAX_FILTERS_2D\n else:\n self.max_num_features = max_num_features\n\n self.conv_blocks_context = []\n self.conv_blocks_localization = []\n self.td = []\n self.tu = []\n self.seg_outputs = []\n\n output_features = base_num_features\n input_features = input_channels\n\n for d in range(num_pool):\n # determine the first stride\n if d != 0 and self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[d - 1]\n else:\n first_stride = None\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[d]\n # add convolutions\n self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,\n self.conv_op, self.conv_kwargs, self.norm_op,\n self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,\n first_stride, basic_block=basic_block))\n if not self.convolutional_pooling:\n self.td.append(pool_op(pool_op_kernel_sizes[d]))\n input_features = output_features\n output_features = int(np.round(output_features * feat_map_mul_on_downscale))\n\n output_features = min(output_features, self.max_num_features)\n\n # now the bottleneck.\n # determine the first stride\n if self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[-1]\n else:\n first_stride = None\n\n # the output of the last conv must match the number of features from the skip connection if we are not using\n # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be\n # done by the transposed conv\n if self.convolutional_upsampling:\n final_num_features = output_features\n else:\n final_num_features = self.conv_blocks_context[-1].output_channels\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]\n self.conv_blocks_context.append(nn.Sequential(\n StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, first_stride, basic_block=basic_block),\n StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, basic_block=basic_block)))\n\n # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here\n if not dropout_in_localization:\n old_dropout_p = self.dropout_op_kwargs['p']\n self.dropout_op_kwargs['p'] = 0.0\n\n # now lets build the localization pathway\n for u in range(num_pool):\n nfeatures_from_down = final_num_features\n nfeatures_from_skip = self.conv_blocks_context[\n -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2\n n_features_after_tu_and_concat = nfeatures_from_skip * 2\n\n # the first conv reduces the number of features to match those of skip\n # the following convs work on that number of features\n # if not convolutional upsampling then the final conv reduces the num of features again\n if u != num_pool - 1 and not self.convolutional_upsampling:\n final_num_features = self.conv_blocks_context[-(3 + u)].output_channels\n else:\n final_num_features = nfeatures_from_skip\n\n if not self.convolutional_upsampling:\n self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))\n else:\n self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],\n pool_op_kernel_sizes[-(u + 1)], bias=False))\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]\n self.conv_blocks_localization.append(nn.Sequential(\n StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,\n self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),\n StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs, basic_block=basic_block)\n ))\n\n for ds in range(len(self.conv_blocks_localization)):\n self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,\n 1, 1, 0, 1, 1, seg_output_use_bias))\n\n self.upscale_logits_ops = []\n cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]\n for usl in range(num_pool - 1):\n if self.upscale_logits:\n self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),\n mode=upsample_mode))\n else:\n self.upscale_logits_ops.append(lambda x: x)\n\n if not dropout_in_localization:\n self.dropout_op_kwargs['p'] = old_dropout_p\n\n # register all modules properly\n self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)\n self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)\n self.td = nn.ModuleList(self.td)\n self.tu = nn.ModuleList(self.tu)\n self.seg_outputs = nn.ModuleList(self.seg_outputs)\n if self.upscale_logits:\n self.upscale_logits_ops = nn.ModuleList(\n self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here\n\n if self.weightInitializer is not None:\n self.apply(self.weightInitializer)\n # self.apply(print_module_training_status)\n\n def forward(self, x):\n skips = []\n seg_outputs = []\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x) # downsample\n\n x = self.conv_blocks_context[-1](x)\n\n\n for u in range(len(self.tu)):\n x = self.tu[u](x) # upsample\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))\n\n if self._deep_supervision and self.do_ds:\n return tuple([seg_outputs[-1]] + [i(j) for i, j in\n zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])\n else:\n return seg_outputs[-1]\n\n @staticmethod\n def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n \"\"\"\n This only applies for num_conv_per_stage and convolutional_upsampling=True\n not real vram consumption. just a constant term to which the vram consumption will be approx proportional\n (+ offset for parameter storage)\n :param deep_supervision:\n :param patch_size:\n :param num_pool_per_axis:\n :param base_num_features:\n :param max_num_features:\n :param num_modalities:\n :param num_classes:\n :param pool_op_kernel_sizes:\n :return:\n \"\"\"\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # print(p, map_size, num_feat, tmp)\n return tmp" }, { "identifier": "default_2D_augmentation_params", "path": "nn_transunet/data/default_data_augmentation.py", "snippet": "def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):\ndef get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1, pin_memory=True,\n seeds_train=None, seeds_val=None, regions=None):" }, { "identifier": "unpack_dataset", "path": "nn_transunet/data/dataset_loading.py", "snippet": "def unpack_dataset(folder, threads=default_num_threads, key=\"data\"):\n \"\"\"\n unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)\n :param folder:\n :param threads:\n :param key:\n :return:\n \"\"\"\n p = Pool(threads)\n npz_files = subfiles(folder, True, None, \".npz\", True)\n p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))\n p.close()\n p.join()" } ]
from collections import OrderedDict from typing import Tuple from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..trainer.loss_functions import MultipleOutputLoss2 from ..trainer.network_trainer import maybe_to_torch, to_cuda from ..trainer.nnUNetTrainer import nnUNetTrainer from ..networks.nnunet_model import Generic_UNet from ..data.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from ..data.dataset_loading import unpack_dataset from sklearn.model_selection import KFold from torch.cuda.amp import autocast from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from loss_functions import DC_and_CE_loss from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp import numpy as np import torch import torch.nn.functional as F
16,795
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss
self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
1
2023-10-11 05:19:25+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"conv_1x5_5x1\",\n \"conv_3x3\",\n \"sep_conv_3x3\",\n # \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n # \"dil_conv_5x5\",\n # \"none\",\n]\ndef to_dag(C_in, gene, reduction):\ndef from_str(s):\ndef parse(alpha, k, primitives=PRIMITIVES_FEWSHOT):\ndef parse_pairwise(alpha, alpha_pairwise, primitives=PRIMITIVES_FEWSHOT): # deprecated" }, { "identifier": "SearchCNNController", "path": "models/search_cnn.py", "snippet": "class SearchCNNController(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n def __init__(\n self,\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n \n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n \n \n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n \n \n\n \n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n \n\n # setup alphas list\n self._alphas = []\n \n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n \n \n self.net = SearchCNN(\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n \n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops.DropPath_):\n module.p = p\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n ) = self._get_normalized_alphas()\n\n \n if len(self.device_ids) == 1 :\n output= self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n return output\n\n \n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n \n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n # wnormal_in_copies,\n # wreduce_in_copies,\n # wnormal_pw_copies,\n # wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "SearchCNNControllerPC", "path": "models/search_cnn_PC.py", "snippet": "class SearchCNNControllerPC(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n\n def __init__(\n self,\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n use_pc_adaptation=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n self.use_pc_adaptation = use_pc_adaptation\n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n\n self.pc_beta_normal = nn.ParameterList()\n self.pc_beta_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n\n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n self.pc_alpha_normal = None\n self.pc_alpha_reduce = None \n\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n if use_pc_adaptation:\n # initialize pc_beta here\n # beta have to be [[2],[3],[4]]\n self.pc_alpha_normal = nn.ParameterList()\n self.pc_alpha_reduce = nn.ParameterList()\n for i in range(n_nodes):\n num_edges = i + 2\n self.pc_alpha_normal.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n self.pc_alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n\n\n # setup alphas list\n self._alphas = []\n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n self.net = SearchCNNPC(\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n weights_pc_normal = None\n weights_pc_reduce = None\n\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n if self.pc_alpha_normal is not None:\n weights_pc_normal = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_normal\n ]\n weights_pc_reduce = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_reduce\n ]\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops_7c.DropPath_):\n module.p = p\n\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n ) = self._get_normalized_alphas()\n\n if len(self.device_ids) == 1:\n return self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n\n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n wnormal_in_copies,\n wreduce_in_copies,\n wnormal_pw_copies,\n wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "Darts", "path": "task_optimizer/darts.py", "snippet": "class Darts:\n def __init__(self, model, config, do_schedule_lr=False):\n\n self.config = config\n self.config.logger = None\n self.model = model\n self.do_schedule_lr = do_schedule_lr\n self.task_train_steps = config.task_train_steps\n self.test_task_train_steps = config.test_task_train_steps\n self.warm_up_epochs = config.warm_up_epochs\n self.eval_switch = 0\n self.pprevious_grads = 0\n # weights optimizer\n\n self.w_optim = torch.optim.Adam(\n self.model.weights(),\n lr=self.config.w_lr,\n betas=(0.0, 0.999), # config.w_momentum,\n weight_decay=self.config.w_weight_decay,\n ) #\n\n # architecture optimizer\n self.a_optim = torch.optim.Adam(\n model.alphas(),\n self.config.alpha_lr,\n betas=(0.0, 0.999),\n weight_decay=self.config.alpha_weight_decay,\n )\n self.architect = Architect(\n self.model,\n self.config.w_momentum,\n self.config.w_weight_decay,\n self.config.use_first_order_darts,\n )\n def step(\n self,\n task,\n epoch,\n global_progress=\"\",\n test_phase=False,\n alpha_logger=None,\n sparsify_input_alphas=None,\n ):\n \n\n\n log_alphas = False\n\n if test_phase:\n top1_logger = self.config.top1_logger_test\n losses_logger = self.config.losses_logger_test\n train_steps = self.config.test_task_train_steps\n arch_adap_steps = int(train_steps * self.config.test_adapt_steps)\n \n if alpha_logger is not None:\n log_alphas = True\n\n else:\n top1_logger = self.config.top1_logger\n losses_logger = self.config.losses_logger\n train_steps = self.config.task_train_steps\n arch_adap_steps = train_steps\n \n\n \n\n lr = self.config.w_lr\n\n if self.config.w_task_anneal:\n for group in self.w_optim.param_groups:\n group[\"lr\"] = self.config.w_lr\n\n w_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.w_optim, train_steps, eta_min=0.0\n )\n else:\n w_task_lr_scheduler = None\n\n if self.config.a_task_anneal:\n for group in self.a_optim.param_groups:\n group[\"lr\"] = self.config.alpha_lr\n\n a_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.a_optim, arch_adap_steps, eta_min=0.0\n )\n\n else:\n a_task_lr_scheduler = None\n\n model_has_normalizer = hasattr(self.model, \"normalizer\")\n if model_has_normalizer:\n self.model.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.model.normalizer[\"params\"][\"max_steps\"] = float(arch_adap_steps)\n self.architect.v_net.normalizer[\"params\"][\"max_steps\"] = float(\n arch_adap_steps\n )\n from tqdm import tqdm\n if self.config.drop_path_prob > 0.0:\n if not test_phase or self.config.use_drop_path_in_meta_testing:\n self.model.drop_path_prob(self.config.drop_path_prob)\n\n p_bar = tqdm(range(train_steps))\n self.config.total_steps = train_steps * len(task.train_loader)\n \n\n\n for train_step in p_bar: # task train_steps = epochs per task\n warm_up = (\n epoch < self.warm_up_epochs\n ) # if epoch < warm_up_epochs, do warm up\n if (\n train_step >= arch_adap_steps\n ): # no architecture adap after arch_adap_steps steps\n warm_up = 1\n\n if w_task_lr_scheduler is not None:\n w_task_lr_scheduler.step()\n\n if a_task_lr_scheduler is not None:\n a_task_lr_scheduler.step()\n torch.cuda.reset_peak_memory_stats(device=0)\n \n task_specific_model = train( \n task,\n self.model,\n self.architect,\n self.w_optim,\n self.a_optim,\n lr,\n global_progress,\n self.config,\n warm_up,\n test_phase\n )\n mem = torch.cuda.memory_stats(0)['allocated_bytes.all.peak']/(1024**2)\n p_bar.set_postfix({\"Memory\" : f\"{mem : .2f}\",\"Task average\":f\"{self.config.top1_logger_test.avg:.1%}\"})\n if train_step == 9:\n self.config.memory_snap = mem\n if (\n model_has_normalizer\n and train_step < (arch_adap_steps - 1)\n and not warm_up\n ): \n self.model.normalizer[\"params\"][\"curr_step\"] += 1\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] += 1\n\n w_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in self.model.named_weights()\n # if layer_weight.grad is not None\n }\n )\n a_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in self.model.named_alphas()\n # if layer_alpha.grad is not None\n }\n )\n\n \n w_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in task_specific_model.named_weights()\n \n }\n )\n a_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in task_specific_model.named_alphas()\n \n }\n )\n # Log genotype\n genotype = self.model.genotype()\n\n if log_alphas:\n alpha_logger[\"normal_relaxed\"].append(\n copy.deepcopy(self.model.alpha_normal)\n )\n alpha_logger[\"reduced_relaxed\"].append(\n copy.deepcopy(self.model.alpha_reduce)\n )\n alpha_logger[\"all_alphas\"].append(a_task)\n alpha_logger[\"normal_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_normal)\n )\n alpha_logger[\"reduced_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_reduce)\n )\n alpha_logger[\"normal_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_normal)\n )\n alpha_logger[\"reduced_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_reduce)\n )\n\n # for test data evaluation, turn off drop path\n if self.config.drop_path_prob > 0.0:\n self.model.drop_path_prob(0.0)\n little_switch = 0\n\n if self.config.naivenaive:\n little_switch = 1\n with torch.no_grad():\n self.config.naivenaive = 1\n self.config.eval_switch = 1\n self.config.cell_phase = 3\n\n for batch_idx, batch in enumerate(task.test_loader):\n \n x_test, y_test = batch\n x_test = x_test.to(self.config.device, non_blocking=True)\n y_test = y_test.to(self.config.device, non_blocking=True)\n if isinstance(self.model, SearchCNNController):\n logits = self.model(\n x_test, sparsify_input_alphas=sparsify_input_alphas\n )\n else:\n logits = self.model(x_test)\n loss = self.model.criterion(logits, y_test)\n\n y_test_pred = logits.softmax(dim=1)\n now = time.strftime('%c', time.localtime(time.time()))\n prec1, prec5 = utils.accuracy(logits, y_test, self.config, topk=(1, 5))\n losses_logger.update(loss.item(), 1)\n top1_logger.update(prec1.item(), 1)\n \n self.config.naivenaive = 0 \n self.config.eval_switch = 0\n self.config.cell_phase = 3 \n\n if little_switch == 1:\n self.config.naivenaive = 1\n \n task_info = namedtuple(\n \"task_info\",\n [\n \"genotype\",\n \"top1\",\n \"w_task\",\n \"a_task\",\n \"loss\",\n \"y_test_pred\",\n \"sparse_num_params\",\n \"w_task_bot\",\n \"a_task_bot\"\n ],\n )\n task_info.w_task = w_task\n task_info.a_task = a_task\n task_info.loss = loss\n y_test_pred = y_test_pred\n task_info.y_test_pred = y_test_pred\n task_info.genotype = genotype\n # task_info.top1 = top1\n\n # task_info.sparse_num_params = self.model.get_sparse_num_params(\n # self.model.alpha_prune_threshold\n # )\n task_info.w_task_bot = w_task_bot\n task_info.a_task_bot = a_task_bot\n\n return task_info" }, { "identifier": "Architect", "path": "task_optimizer/darts.py", "snippet": "class Architect:\n \"\"\" Compute gradients of alphas \"\"\"\n\n def __init__(self, net, w_momentum, w_weight_decay, use_first_order_darts):\n \"\"\"\n Args:\n net\n w_momentum: weights momentum\n \"\"\"\n self.net = net\n self.v_net = copy.deepcopy(net)\n self.w_momentum = w_momentum\n self.w_weight_decay = w_weight_decay\n self.use_first_order_darts = use_first_order_darts\n self.pprevious_grads = list()\n \n\n def virtual_step(self, train_X, train_y, xi, w_optim):\n \"\"\"\n Compute unrolled weight w' (virtual step)\n\n Step process:\n 1) forward\n 2) calc loss\n 3) compute gradient (by backprop)\n 4) update gradient\n\n Args:\n xi: learning rate for virtual gradient step (same as weights lr)\n w_optim: weights optimizer\n \"\"\"\n # forward & calc loss\n loss = self.net.loss(train_X, train_y) # L_train(w)\n\n # compute gradient\n gradients = torch.autograd.grad(loss, self.net.weights())\n\n \n \n\n\n\n \n # do virtual step (update gradient)\n # below operations do not need gradient tracking\n with torch.no_grad():\n # dict key is not the value, but the pointer. So original network weight have to\n # be iterated also.\n for w, vw, g in zip(self.net.weights(), self.v_net.weights(), gradients):\n m = w_optim.state[w].get(\"momentum_buffer\", 0.0) * self.w_momentum\n vw.copy_(w - xi * (m + g + self.w_weight_decay * w))\n\n # synchronize alphas\n for a, va in zip(self.net.alphas(), self.v_net.alphas()):\n va.copy_(a)\n\n def backward(self, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # calc unrolled loss\n loss = self.v_net.loss(val_X, val_y) # L_val(w`)\n # compute gradient\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n v_grads = torch.autograd.grad(loss, v_alphas + v_weights, allow_unused=True)\n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n\n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n alpha.grad = da\n \n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n\n\n\n def partial_alpha_backward(self,config, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n \n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # compute gradient\n grad_output_sum = copy.deepcopy(self.v_net.net.config.alpha_previous_grad)\n \n if config.residual_flag == 1:\n pprevious_grad = copy.deepcopy(self.v_net.net.config.alpha_pprevious_grad)\n self.pprevious_grads.append(pprevious_grad) \n \n latent = self.v_net(val_X)\n\n\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n\n if config.residual_flag == 1:\n try:\n if self.v_net.net.config.cell_phase == 1:\n grad_output_sum = torch.add(self.pprevious_grads[0],grad_output_sum)\n\n elif self.v_net.net.config.cell_phase == 0:\n grad_output_sum = torch.add(self.pprevious_grads[1],grad_output_sum)\n except:\n print(f\"Shape error,{grad_output_sum.shape} was the desired shape but you got {self.pprevious_grads[0].shape} or {self.pprevious_grads[1].shape}.\")\n print(\"Bypassing residual flag.\")\n\n v_grads = torch.autograd.grad(latent, v_alphas + v_weights, grad_outputs=grad_output_sum, allow_unused=True) \n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n \n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n if alpha.grad is not None and da is not None:\n alpha.grad.data.add_(da)\n else:\n alpha.grad= da\n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n def compute_hessian(self, dw, train_X, train_y):\n \"\"\"\n dw = dw` { L_val(w`, alpha) }\n w+ = w + eps * dw\n w- = w - eps * dw\n hessian = (dalpha { L_train(w+, alpha) } - dalpha { L_train(w-, alpha) }) / (2*eps)\n eps = 0.01 / ||dw||\n \"\"\"\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = 0.01 / norm\n \n # w+ = w + eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n # dalpha { L_train(w+) }\n loss = self.net.loss(train_X, train_y)\n dalpha_pos = torch.autograd.grad(loss, self.net.alphas())\n\n # w- = w - eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p -= 2.0 * eps * d\n\n # dalpha { L_train(w-) }\n loss = self.net.loss(train_X, train_y)\n dalpha_neg = torch.autograd.grad(loss, self.net.alphas())\n\n # recover w\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n hessian = [(p - n) / 2.0 * eps for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian" }, { "identifier": "train", "path": "task_optimizer/darts.py", "snippet": "def train(\n task,\n model,\n architect,\n w_optim,\n alpha_optim,\n lr,\n global_progress,\n config,\n warm_up=False,\n test_phase = False\n):\n model.train()\n pprevious_grads = list()\n initial_model = copy.deepcopy(model)\n \n p_bar_monitor = (enumerate(zip(task.train_loader, task.valid_loader)))#\n for step, ((train_X, train_y), (val_X, val_y)) in p_bar_monitor:\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n \n train_X, train_y = train_X.to(config.device), train_y.to(config.device)\n val_X, val_y = val_X.to(config.device), val_y.to(config.device)\n N = train_X.size(0)\n initial_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n \n if config.light_exp == 1:\n\n if config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset != \"cifar10\" and config.dataset != \"cifar100\":\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n # phase 2. architect step (alpha)\n prohibited_list = config.prohibited_list\n if config.naivenaive != 1 and config.eval_switch != 1 and config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset not in prohibited_list:\n\n w_optim.zero_grad()\n alpha_optim.zero_grad()\n train_X, train_y = train_X.chunk(config.split_num), train_y.chunk(config.split_num)\n val_X,val_y = val_X.chunk(config.split_num), val_y.chunk(config.split_num)\n \n for (train_X_chunk, train_y_chunk) ,(val_X_chunk,val_y_chunk) in zip(zip(train_X,train_y),zip(val_X,val_y)):\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n for phase in range(config.layers):\n \n if not warm_up: # only update alphas outside warm up phase\n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X_chunk, train_y_chunk, lr, w_optim) # (calc w`)\n \n if config.cell_phase == config.layers -1:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1 \n architect.backward(train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim)\n \n \n else:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1\n architect.partial_alpha_backward(config, train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim) \n \n \n model.net.alpha_switch = 0\n architect.v_net.net.alpha_switch = 0\n\n # phase 1. child network step (w)\n if config.cell_phase == config.layers -1:\n w_optim.zero_grad()\n logits = model(train_X_chunk)\n loss = model.criterion(logits, train_y_chunk)\n loss_monitor = loss.item()\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip) \n w_optim.step()\n\n\n else:\n w_optim.zero_grad()\n output_grad_sum = copy.deepcopy(config.previous_grad)\n pprevious_grad = copy.deepcopy(config.pprevious_grad)\n pprevious_grads.append(pprevious_grad)\n\n if config.residual_flag == 1:\n if config.cell_phase == 1:\n if pprevious_grads[0].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[0],output_grad_sum)\n elif config.cell_phase == 0:\n if pprevious_grads[1].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[1],output_grad_sum)\n latent = model(train_X_chunk)\n\n\n \n try:\n latent.backward(output_grad_sum)\n \n except:\n if output_grad_sum is not None:\n print(\"batch passed,\",output_grad_sum.shape, \" was the shape of grad saved\")\n print(\"what we had to save was this shape, \", latent.shape )\n print(f\"And this was the phase.{config.cell_phase} what can be the problem here ? \")\n else:\n print(\"output was none. Why?\")\n pass\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n \n\n \n config.cell_phase -= 1\n architect.v_net.net.config.cell_phase -= 1\n alpha_optim.step() \n w_optim.step()\n \n\n \n \n \n\n else:\n if not warm_up: # only update alphas outside warm up phase\n alpha_optim.zero_grad()\n \n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X, train_y, lr, w_optim) # (calc w`)\n \n architect.backward(train_X, train_y, val_X, val_y, lr, w_optim)\n alpha_optim.step()\n \n\n \n w_optim.zero_grad()\n \n logits = model(train_X)\n \n loss = model.criterion(logits, train_y)\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n w_optim.step()\n\n \n \n\n\n end.record()\n torch.cuda.synchronize()\n config.computing_time += start.elapsed_time(end)\n \n config.total_steps -= 1\n pprevious_grads = list()\n architect.pprevious_grads = list()\n \n if config.alpha_expect and config.meta_model != 'pc_adaptation':\n if len(config.alpha_grad_footprints) <= 5:\n\n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n config.alpha_grad_footprints.append(alpha_grad) \n\n\n else:\n \n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n \n config.alpha_grad_footprints.pop(0) \n config.alpha_grad_footprints.append(alpha_grad) \n\n config.alpha_sample_metrics = _exp_alpha_metric(initial_alpha,config)\n architect.v_net.net.config.alpha_sample_metrics = config.alpha_sample_metrics\n\n ###################################################################################\n\n\n task_specific_model = copy.deepcopy(model)\n task_specific_model = get_diff_for_const_bottom(initial_model,task_specific_model)\n \n return task_specific_model" } ]
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
15,692
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation")
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation")
model = SearchCNNControllerPC(
2
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def move(self, src: str, dst: str):\n raise NotImplementedError\n\n def copy(self, src: str, dst: str):\n raise NotImplementedError\n\n def makedirs(self, path: str, exist_ok=True):\n raise NotImplementedError\n\n def remove(self, path: str):\n raise NotImplementedError\n\n def listdir(self, path: str, recursive=False, full_path=False, contains=None):\n raise NotImplementedError\n\n def isdir(self, path: str) -> bool:\n raise NotImplementedError\n\n def isfile(self, path: str) -> bool:\n raise NotImplementedError\n\n def abspath(self, path: str) -> str:\n raise NotImplementedError\n\n def last_modified(self, path: str) -> datetime:\n raise NotImplementedError\n\n def md5(self, path: str) -> str:\n hash_md5 = hashlib.md5()\n with self.open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n re_remote = re.compile(r'(oss|https?)://')\n\n def islocal(self, path: str) -> bool:\n return not self.re_remote.match(path.lstrip())" }, { "identifier": "DefaultIO", "path": "serve/io_utils.py", "snippet": "class DefaultIO(IO):\n __name__ = 'DefaultIO'\n\n def _check_path(self, path):\n if not self.islocal(path):\n raise RuntimeError(\n 'Credentials must be provided to use oss path. '\n 'Make sure you have created \"user/modules/oss_credentials.py\" according to ReadMe.')\n\n def open(self, path, mode='r'):\n self._check_path(path)\n path = self.abspath(path)\n return open(path, mode=mode)\n\n def exists(self, path):\n self._check_path(path)\n path = self.abspath(path)\n return os.path.exists(path)\n\n def move(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n shutil.move(src, dst)\n\n def copy(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n try:\n shutil.copyfile(src, dst)\n except shutil.SameFileError:\n pass\n\n def makedirs(self, path, exist_ok=True):\n self._check_path(path)\n path = self.abspath(path)\n os.makedirs(path, exist_ok=exist_ok)\n\n def remove(self, path):\n self._check_path(path)\n path = self.abspath(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n self._check_path(path)\n path = self.abspath(path)\n contains = contains or ''\n if recursive:\n files = (os.path.join(dp, f) if full_path else f for dp, dn, fn in os.walk(path) for f in fn)\n files = [file for file in files if contains in file]\n else:\n files = os.listdir(path)\n if full_path:\n files = [os.path.join(path, file) for file in files if contains in file]\n return files\n\n def isdir(self, path):\n return os.path.isdir(path)\n\n def isfile(self, path):\n return os.path.isfile(path)\n\n def abspath(self, path):\n return os.path.abspath(path)\n\n def last_modified(self, path):\n return datetime.fromtimestamp(os.path.getmtime(path))" }, { "identifier": "OSS", "path": "serve/io_utils.py", "snippet": "class OSS(DefaultIO):\n \"Mixed IO module to support both system-level and OSS IO methods\"\n __name__ = 'OSS'\n\n def __init__(self, access_key_id: str, access_key_secret: str, region_bucket: List[List[str]]):\n \"\"\"\n the value of \"region_bucket\" should be something like [[\"cn-hangzhou\", \"<yourBucketName>\"], [\"cn-zhangjiakou\", \"<yourBucketName>\"]],\n specifying your buckets and corresponding regions\n \"\"\"\n from oss2 import Auth, Bucket, ObjectIterator\n super().__init__()\n self.ObjectIterator = ObjectIterator\n self.auth = Auth(access_key_id, access_key_secret)\n self.buckets = {\n bucket_name: Bucket(self.auth, f'http://oss-{region}.aliyuncs.com', bucket_name)\n for region, bucket_name in region_bucket\n }\n self.oss_pattern = re.compile(r'oss://([^/]+)/(.+)')\n\n def _split_name(self, path):\n m = self.oss_pattern.match(path)\n if not m:\n raise IOError(f'invalid oss path: \"{path}\", should be \"oss://<bucket_name>/path\"')\n bucket_name, path = m.groups()\n return bucket_name, path\n\n def _split(self, path):\n bucket_name, path = self._split_name(path)\n try:\n bucket = self.buckets[bucket_name]\n except KeyError:\n raise IOError(f'Bucket {bucket_name} not registered in oss_credentials.py')\n return bucket, path\n\n def open(self, full_path, mode='r'):\n if not full_path.startswith('oss://'):\n return super().open(full_path, mode)\n\n bucket, path = self._split(full_path)\n with mute_stderr():\n path_exists = bucket.object_exists(path)\n if 'w' in mode:\n if path_exists:\n bucket.delete_object(path)\n if 'b' in mode:\n return BinaryOSSFile(bucket, path)\n return OSSFile(bucket, path)\n elif mode == 'a':\n position = bucket.head_object(path).content_length if path_exists else 0\n return OSSFile(bucket, path, position=position)\n else:\n if not path_exists:\n raise FileNotFoundError(full_path)\n obj = bucket.get_object(path)\n # auto cache large files to avoid memory issues\n # if obj.content_length > 30 * 1024 ** 2: # 30M\n # from da.utils import cache_file\n # path = cache_file(full_path)\n # return super().open(path, mode)\n if mode == 'rb':\n # TODO for a large file, this will load the whole file into memory\n return NullContextWrapper(BytesIO(obj.read()))\n else:\n assert mode == 'r'\n return NullContextWrapper(StringIO(obj.read().decode()))\n\n def exists(self, path):\n if not path.startswith('oss://'):\n return super().exists(path)\n\n bucket, _path = self._split(path)\n # if file exists\n exists = self._file_exists(bucket, _path)\n # if directory exists\n if not exists:\n try:\n self.listdir(path)\n exists = True\n except FileNotFoundError:\n pass\n return exists\n\n def _file_exists(self, bucket, path):\n with mute_stderr():\n return bucket.object_exists(path)\n\n def move(self, src, dst):\n if not src.startswith('oss://') and not dst.startswith('oss://'):\n return super().move(src, dst)\n self.copy(src, dst)\n self.remove(src)\n\n def copy(self, src, dst):\n cloud_src = src.startswith('oss://')\n cloud_dst = dst.startswith('oss://')\n if not cloud_src and not cloud_dst:\n return super().copy(src, dst)\n\n # download\n if cloud_src and not cloud_dst:\n bucket, src = self._split(src)\n obj = bucket.get_object(src)\n if obj.content_length > 100 * 1024 ** 2: # 100M\n from tqdm import tqdm\n progress = None\n\n def callback(i, n):\n nonlocal progress\n if progress is None:\n progress = tqdm(total=n, unit='B', unit_scale=True, unit_divisor=1024, leave=False,\n desc=f'downloading')\n progress.update(i - progress.n)\n\n bucket.get_object_to_file(src, dst, progress_callback=callback)\n if progress is not None:\n progress.close()\n else:\n bucket.get_object_to_file(src, dst)\n return\n bucket, dst = self._split(dst)\n # upload\n if cloud_dst and not cloud_src:\n bucket.put_object_from_file(dst, src)\n return\n # copy between oss paths\n if src != dst:\n src_bucket_name, src = self._split_name(src)\n bucket.copy_object(src_bucket_name, src, dst)\n # TODO: support large file copy\n # https://help.aliyun.com/document_detail/88465.html?spm=a2c4g.11174283.6.882.4d157da2mgp3xc\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n if not path.startswith('oss://'):\n return super().listdir(path, recursive, full_path, contains)\n\n bucket, path = self._split(path)\n path = path.rstrip('/') + '/'\n files = [obj.key for obj in self.ObjectIterator(bucket, prefix=path, delimiter='' if recursive else '/')]\n try:\n files.remove(path)\n except ValueError:\n pass\n if full_path:\n files = [f'oss://{bucket.bucket_name}/{file}' for file in files]\n else:\n files = [file[len(path):] for file in files]\n if not files:\n raise FileNotFoundError(f'No such directory: oss://{bucket.bucket_name}/{path}')\n files = [file for file in files if (contains or '') in file]\n return files\n\n def remove(self, path):\n if not path.startswith('oss://'):\n return super().remove(path)\n\n if self.isfile(path):\n paths = [path]\n else:\n paths = self.listdir(path, recursive=True, full_path=True)\n for path in paths:\n bucket, path = self._split(path)\n bucket.delete_object(path)\n\n def makedirs(self, path, exist_ok=True):\n # there is no need to create directory in oss\n if not path.startswith('oss://'):\n return super().makedirs(path)\n\n def isdir(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path.rstrip('/') + '/')\n\n def isfile(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path) and not self.isdir(path)\n\n def abspath(self, path):\n if not path.startswith('oss://'):\n return super().abspath(path)\n return path\n\n def authorize(self, path):\n if not path.startswith('oss://'):\n raise ValueError('Only oss path can use \"authorize\"')\n import oss2\n bucket, path = self._split(path)\n bucket.put_object_acl(path, oss2.OBJECT_ACL_PUBLIC_READ)\n\n def last_modified(self, path):\n if not path.startswith('oss://'):\n return super().last_modified(path)\n bucket, path = self._split(path)\n return datetime.strptime(\n bucket.get_object_meta(path).headers['Last-Modified'],\n r'%a, %d %b %Y %H:%M:%S %Z'\n ) + timedelta(hours=8)" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlImageProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlImageProcessor(CLIPImageProcessor):\n pass" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "MplugOwlConfig", "path": "mplug_owl/configuration_mplug_owl.py", "snippet": "class MplugOwlConfig(PretrainedConfig):\n r\"\"\"\n [`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`].\n It is used to instantiate a mPLUG-Owl model according to the specified arguments, defining the vision model,\n Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the mPLUG-Owl [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b)\n architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisionConfig`].\n visual_abstractor_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisualAbstractorConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n num_query_tokens (`int`, *optional*, defaults to 32):\n The number of query tokens passed through the Transformer.\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... MplugOwlVisionConfig,\n ... MplugOwlVisualAbstractorConfig,\n ... OPTConfig,\n ... MplugOwlConfig,\n ... MplugOwlForConditionalGeneration,\n ... )\n\n >>> # Initializing a MplugOwlConfig with x-plug/x_plug-llama-7b style configuration\n >>> configuration = MplugOwlConfig()\n\n >>> # Initializing a MplugOwlForConditionalGeneration (with random weights) from the x-plug/x_plug-llama-7b style configuration\n >>> model = MplugOwlForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a MplugOwlConfig from a MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig and any PretrainedConfig\n\n >>> # Initializing mPLUG-Owl vision, mPLUG-Owl Q-Former and language model configurations\n >>> vision_config = MplugOwlVisionConfig()\n >>> visual_abstractor_config = MplugOwlVisualAbstractorConfig()\n >>> text_config = OPTConfig()\n\n >>> config = MplugOwlConfig.from_text_vision_configs(vision_config, visual_abstractor_config, text_config)\n ```\"\"\"\n model_type = \"mplug-owl\"\n is_composition = True\n\n def __init__(\n self, vision_config=None, visual_abstractor_config=None, text_config=None, num_query_tokens=64, **kwargs\n ):\n super().__init__(**kwargs)\n if vision_config is None:\n vision_config = MplugOwlVisionConfig().to_dict()\n logger.info(\"vision_config is None.\")\n\n if visual_abstractor_config is None:\n visual_abstractor_config = {}\n logger.info(\"abstractor_config is None. \")\n\n if text_config is None:\n # we use LLAMA 7b by default\n from transformers.llama.configuration_llama import LlamaConfig\n\n text_config = LlamaConfig(pad_token_id=2).to_dict()\n logger.info(\"text_config is None.\")\n\n self.vision_config = MplugOwlVisionConfig(**vision_config)\n self.visual_abstractor_config = MplugOwlVisualAbstractorConfig(**visual_abstractor_config)\n # self.visual_abstractor_config.layer_norm_eps = 1e-6\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"llama\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n self.num_query_tokens = num_query_tokens\n # self.visual_abstractor_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n for attr in dir(self.text_config):\n if not hasattr(self, attr):\n setattr(self, attr, getattr(self.text_config, attr))\n\n @classmethod\n def from_vision_visual_abstractor_text_configs(\n cls,\n vision_config: MplugOwlVisionConfig,\n visual_abstractor_config: MplugOwlVisualAbstractorConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`MplugOwlConfig`] (or a derived class) from a mPLUG-Owl vision model, Q-Former and language\n model configurations.\n\n Returns:\n [`MplugOwlConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n visual_abstractor_config=visual_abstractor_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"visual_abstractor_config\"] = self.visual_abstractor_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output" }, { "identifier": "MplugOwlTokenizer", "path": "mplug_owl/tokenization_mplug_owl.py", "snippet": "class MplugOwlTokenizer(LlamaTokenizer):\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=\"<unk>\",\n sp_model_kwargs=None,\n add_bos_token=False,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n unk_token,\n bos_token,\n eos_token,\n pad_token,\n sp_model_kwargs,\n add_bos_token,\n add_eos_token,\n clean_up_tokenization_spaces,\n **kwargs,\n )\n self.eod_id = self.eos_token_id" }, { "identifier": "post_process_output", "path": "serve/model_utils.py", "snippet": "def post_process_output(text):\n text = text.strip()\n pattern = re.compile(\n r\"<unk>|<pad>|<s>|</s>|\\[PAD\\]|<\\|endoftext\\|>|\\[UNK\\]|\\[CLS\\]|\\[MASK\\]|<\\|startofpiece\\|>|<\\|endofpiece\\|>|\\[gMASK\\]|\\[sMASK\\]\"\n )\n text = pattern.sub(\"\", text.strip()).strip()\n return text" }, { "identifier": "Stream", "path": "serve/model_utils.py", "snippet": "class Stream(transformers.StoppingCriteria):\n def __init__(self, callback_func=None):\n self.callback_func = callback_func\n\n def __call__(self, input_ids, scores) -> bool:\n if self.callback_func is not None:\n self.callback_func(input_ids[0])\n return False" }, { "identifier": "Iteratorize", "path": "serve/model_utils.py", "snippet": "class Iteratorize:\n\n \"\"\"\n Transforms a function that takes a callback\n into a lazy iterator (generator).\n \"\"\"\n\n def __init__(self, func, kwargs={}, callback=None):\n self.mfunc = func\n self.c_callback = callback\n self.q = Queue()\n self.sentinel = object()\n self.kwargs = kwargs\n self.stop_now = False\n\n def _callback(val):\n if self.stop_now:\n raise ValueError\n self.q.put(val)\n\n def gentask():\n try:\n ret = self.mfunc(callback=_callback, **self.kwargs)\n except ValueError:\n pass\n except:\n traceback.print_exc()\n pass\n\n self.q.put(self.sentinel)\n if self.c_callback:\n self.c_callback(ret)\n\n self.thread = Thread(target=gentask)\n self.thread.start()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n obj = self.q.get(True, None)\n if obj is self.sentinel:\n raise StopIteration\n else:\n return obj\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop_now = True" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "build_processors", "path": "pipeline/data_utils/processors/builder.py", "snippet": "def build_processors(processors_cfg):\n processors = dict()\n for task, processor in processors_cfg.items():\n processors[task] = build_from_cfg(processor, PROCESSORS)\n ic(type(processors[task]))\n return processors" } ]
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
14,657
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model)
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model)
self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer)
11
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/dino/configs/models/dino_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "ChannelMapper", "path": "detrex/modeling/neck/channel_mapper.py", "snippet": "class ChannelMapper(nn.Module):\n \"\"\"Channel Mapper for reduce/increase channels of backbone features. Modified\n from `mmdet <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/channel_mapper.py>`_.\n\n This is used to reduce/increase the channels of backbone features.\n\n Args:\n input_shape (Dict[str, ShapeSpec]): A dict which contains the backbone features meta infomation,\n e.g. ``input_shape = {\"res5\": ShapeSpec(channels=2048)}``.\n in_features (List[str]): A list contains the keys which maps the features output from the backbone,\n e.g. ``in_features = [\"res\"]``.\n out_channels (int): Number of output channels for each scale.\n kernel_size (int, optional): Size of the convolving kernel for each scale.\n Default: 3.\n stride (int, optional): Stride of convolution for each scale. Default: 1.\n bias (bool, optional): If True, adds a learnable bias to the output of each scale.\n Default: True.\n groups (int, optional): Number of blocked connections from input channels to\n output channels for each scale. Default: 1.\n dilation (int, optional): Spacing between kernel elements for each scale.\n Default: 1.\n norm_layer (nn.Module, optional): The norm layer used for each scale. Default: None.\n activation (nn.Module, optional): The activation layer used for each scale. Default: None.\n num_outs (int, optional): Number of output feature maps. There will be ``extra_convs`` when\n ``num_outs`` is larger than the length of ``in_features``. Default: None.\n\n Examples:\n >>> import torch\n >>> import torch.nn as nn\n >>> from detrex.modeling import ChannelMapper\n >>> from detectron2.modeling import ShapeSpec\n >>> input_features = {\n ... \"p0\": torch.randn(1, 128, 128, 128),\n ... \"p1\": torch.randn(1, 256, 64, 64),\n ... \"p2\": torch.randn(1, 512, 32, 32),\n ... \"p3\": torch.randn(1, 1024, 16, 16),\n ... }\n >>> input_shapes = {\n ... \"p0\": ShapeSpec(channels=128),\n ... \"p1\": ShapeSpec(channels=256),\n ... \"p2\": ShapeSpec(channels=512),\n ... \"p3\": ShapeSpec(channels=1024),\n ... }\n >>> in_features = [\"p0\", \"p1\", \"p2\", \"p3\"]\n >>> neck = ChannelMapper(\n ... input_shapes=input_shapes,\n ... in_features=in_features,\n ... out_channels=256,\n ... norm_layer=nn.GroupNorm(num_groups=32, num_channels=256)\n >>> outputs = neck(input_features)\n >>> for i in range(len(outputs)):\n ... print(f\"output[{i}].shape = {outputs[i].shape}\")\n output[0].shape = torch.Size([1, 256, 128, 128])\n output[1].shape = torch.Size([1, 256, 64, 64])\n output[2].shape = torch.Size([1, 256, 32, 32])\n output[3].shape = torch.Size([1, 256, 16, 16])\n \"\"\"\n\n def __init__(\n self,\n input_shapes: Dict[str, ShapeSpec],\n in_features: List[str],\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n bias: bool = True,\n groups: int = 1,\n dilation: int = 1,\n norm_layer: nn.Module = None,\n activation: nn.Module = None,\n num_outs: int = None,\n **kwargs,\n ):\n super(ChannelMapper, self).__init__()\n self.extra_convs = None\n\n in_channels_per_feature = [input_shapes[f].channels for f in in_features]\n\n if num_outs is None:\n num_outs = len(input_shapes)\n\n self.convs = nn.ModuleList()\n for in_channel in in_channels_per_feature:\n self.convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n if num_outs > len(in_channels_per_feature):\n self.extra_convs = nn.ModuleList()\n for i in range(len(in_channels_per_feature), num_outs):\n if i == len(in_channels_per_feature):\n in_channel = in_channels_per_feature[-1]\n else:\n in_channel = out_channels\n self.extra_convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n self.input_shapes = input_shapes\n self.in_features = in_features\n self.out_channels = out_channels\n\n def forward(self, inputs):\n \"\"\"Forward function for ChannelMapper\n\n Args:\n inputs (Dict[str, torch.Tensor]): The backbone feature maps.\n\n Return:\n tuple(torch.Tensor): A tuple of the processed features.\n \"\"\"\n assert len(inputs) == len(self.convs)\n outs = [self.convs[i](inputs[self.in_features[i]]) for i in range(len(inputs))]\n if self.extra_convs:\n for i in range(len(self.extra_convs)):\n if i == 0:\n outs.append(self.extra_convs[0](inputs[self.in_features[-1]]))\n else:\n outs.append(self.extra_convs[i](outs[-1]))\n return tuple(outs)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "DINOTransformerEncoder", "path": "projects/dino/modeling/dino_transformer.py", "snippet": "class DINOTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = False,\n num_feature_levels: int = 4,\n use_checkpoint: bool = False,\n ):\n super(DINOTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n num_fcs=2,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n # use encoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "DINOTransformerDecoder", "path": "projects/dino/modeling/dino_transformer.py", "snippet": "class DINOTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n return_intermediate: bool = True,\n num_feature_levels: int = 4,\n look_forward_twice: bool = True,\n use_checkpoint: bool = True,\n ):\n super(DINOTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=[\n MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=True,\n ),\n MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ],\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"cross_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n\n self.ref_point_head = MLP(2 * embed_dim, embed_dim, embed_dim, 2)\n\n self.bbox_embed = None\n self.class_embed = None\n self.look_forward_twice = look_forward_twice\n self.norm = nn.LayerNorm(embed_dim)\n\n # decoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n reference_points=None, # num_queries, 4. normalized.\n valid_ratios=None,\n **kwargs,\n ):\n output = query\n bs, num_queries, _ = output.size()\n if reference_points.dim() == 2:\n reference_points = reference_points.unsqueeze(0).repeat(bs, 1, 1) # bs, num_queries, 4\n\n intermediate = []\n intermediate_reference_points = []\n for layer_idx, layer in enumerate(self.layers):\n if reference_points.shape[-1] == 4:\n reference_points_input = (\n reference_points[:, :, None]\n * torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n )\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]\n\n query_sine_embed = get_sine_pos_embed(reference_points_input[:, :, 0, :])\n query_pos = self.ref_point_head(query_sine_embed)\n\n output = layer(\n output,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n query_sine_embed=query_sine_embed,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n reference_points=reference_points_input,\n **kwargs,\n )\n\n if self.bbox_embed is not None:\n tmp = self.bbox_embed[layer_idx](output)\n if reference_points.shape[-1] == 4:\n new_reference_points = tmp + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n else:\n assert reference_points.shape[-1] == 2\n new_reference_points = tmp\n new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n reference_points = new_reference_points.detach()\n\n if self.return_intermediate:\n intermediate.append(self.norm(output))\n if self.look_forward_twice:\n intermediate_reference_points.append(new_reference_points)\n else:\n intermediate_reference_points.append(reference_points)\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points" }, { "identifier": "DINOTransformer", "path": "projects/dino/modeling/dino_transformer.py", "snippet": "class DINOTransformer(nn.Module):\n \"\"\"Transformer module for DINO\n\n Args:\n encoder (nn.Module): encoder module.\n decoder (nn.Module): decoder module.\n as_two_stage (bool): whether to use two-stage transformer. Default False.\n num_feature_levels (int): number of feature levels. Default 4.\n two_stage_num_proposals (int): number of proposals in two-stage transformer. Default 900.\n \"\"\"\n\n def __init__(\n self,\n encoder=None,\n decoder=None,\n num_feature_levels=4,\n two_stage_num_proposals=900,\n learnt_init_query=True,\n ):\n super(DINOTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.num_feature_levels = num_feature_levels\n self.two_stage_num_proposals = two_stage_num_proposals\n\n self.embed_dim = self.encoder.embed_dim\n\n self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dim))\n self.learnt_init_query = learnt_init_query\n if self.learnt_init_query:\n self.tgt_embed = nn.Embedding(self.two_stage_num_proposals, self.embed_dim)\n self.enc_output = nn.Linear(self.embed_dim, self.embed_dim)\n self.enc_output_norm = nn.LayerNorm(self.embed_dim)\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MultiScaleDeformableAttention):\n m.init_weights()\n nn.init.normal_(self.level_embeds)\n\n def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n N, S, C = memory.shape\n proposals = []\n _cur = 0\n for lvl, (H, W) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H * W)].view(N, H, W, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(0, H - 1, H, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W - 1, W, dtype=torch.float32, device=memory.device),\n )\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n proposals.append(proposal)\n _cur += H * W\n\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(\n -1, keepdim=True\n )\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(\n memory_padding_mask.unsqueeze(-1), float(\"inf\")\n )\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float(\"inf\"))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n output_memory = self.enc_output_norm(self.enc_output(output_memory))\n return output_memory, output_proposals\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n \"\"\"Get the reference points used in decoder.\n\n Args:\n spatial_shapes (Tensor): The shape of all\n feature maps, has shape (num_level, 2).\n valid_ratios (Tensor): The ratios of valid\n points on the feature map, has shape\n (bs, num_levels, 2)\n device (obj:`device`): The device where\n reference_points should be.\n\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n reference_points_list = []\n for lvl, (H, W) in enumerate(spatial_shapes):\n # TODO check this 0.5\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(0.5, H - 0.5, H, dtype=torch.float32, device=device),\n torch.linspace(0.5, W - 0.5, W, dtype=torch.float32, device=device),\n )\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def get_valid_ratio(self, mask):\n \"\"\"Get the valid ratios of feature maps of all levels.\"\"\"\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def forward(\n self,\n multi_level_feats,\n multi_level_masks,\n multi_level_pos_embeds,\n query_embed,\n attn_masks,\n **kwargs,\n ):\n feat_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (feat, mask, pos_embed) in enumerate(\n zip(multi_level_feats, multi_level_masks, multi_level_pos_embeds)\n ):\n bs, c, h, w = feat.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n\n feat = feat.flatten(2).transpose(1, 2) # bs, hw, c\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c\n lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n feat_flatten.append(feat)\n mask_flatten.append(mask)\n feat_flatten = torch.cat(feat_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_flatten.device\n )\n level_start_index = torch.cat(\n (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n )\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in multi_level_masks], 1)\n\n reference_points = self.get_reference_points(\n spatial_shapes, valid_ratios, device=feat.device\n )\n\n memory = self.encoder(\n query=feat_flatten,\n key=None,\n value=None,\n query_pos=lvl_pos_embed_flatten,\n query_key_padding_mask=mask_flatten,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points, # bs, num_token, num_level, 2\n level_start_index=level_start_index,\n valid_ratios=valid_ratios,\n **kwargs,\n )\n\n output_memory, output_proposals = self.gen_encoder_output_proposals(\n memory, mask_flatten, spatial_shapes\n )\n # output_memory: bs, num_tokens, c\n # output_proposals: bs, num_tokens, 4. unsigmoided.\n\n enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n enc_outputs_coord_unact = (\n self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n ) # unsigmoided.\n\n topk = self.two_stage_num_proposals\n topk_proposals = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1]\n\n # extract region proposal boxes\n topk_coords_unact = torch.gather(\n enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n ) # unsigmoided.\n reference_points = topk_coords_unact.detach().sigmoid()\n if query_embed[1] is not None:\n reference_points = torch.cat([query_embed[1].sigmoid(), reference_points], 1)\n init_reference_out = reference_points\n\n # extract region features\n target_unact = torch.gather(\n output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, output_memory.shape[-1])\n )\n if self.learnt_init_query:\n target = self.tgt_embed.weight[None].repeat(bs, 1, 1)\n else:\n target = target_unact.detach()\n if query_embed[0] is not None:\n target = torch.cat([query_embed[0], target], 1)\n\n # decoder\n inter_states, inter_references = self.decoder(\n query=target, # bs, num_queries, embed_dims\n key=memory, # bs, num_tokens, embed_dims\n value=memory, # bs, num_tokens, embed_dims\n query_pos=None,\n key_padding_mask=mask_flatten, # bs, num_tokens\n reference_points=reference_points, # num_queries, 4\n spatial_shapes=spatial_shapes, # nlvl, 2\n level_start_index=level_start_index, # nlvl\n valid_ratios=valid_ratios, # bs, nlvl, 2\n attn_masks=attn_masks,\n **kwargs,\n )\n\n inter_references_out = inter_references\n return (\n inter_states,\n init_reference_out,\n inter_references_out,\n target_unact,\n topk_coords_unact.sigmoid(),\n )" }, { "identifier": "DINO", "path": "projects/dino/modeling/dino.py", "snippet": "class DINO(nn.Module):\n \"\"\"Implement DAB-Deformable-DETR in `DAB-DETR: Dynamic Anchor Boxes are Better Queries for DETR\n <https://arxiv.org/abs/2203.03605>`_.\n\n Code is modified from the `official github repo\n <https://github.com/IDEA-Research/DINO>`_.\n\n Args:\n backbone (nn.Module): backbone module\n position_embedding (nn.Module): position embedding module\n neck (nn.Module): neck module to handle the intermediate outputs features\n transformer (nn.Module): transformer module\n embed_dim (int): dimension of embedding\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n aux_loss (bool): Whether to calculate auxiliary loss in criterion. Default: True.\n select_box_nums_for_evaluation (int): the number of topk candidates\n slected at postprocess for evaluation. Default: 300.\n device (str): Training device. Default: \"cuda\".\n \"\"\"\n\n def __init__(\n self,\n backbone: nn.Module,\n position_embedding: nn.Module,\n neck: nn.Module,\n transformer: nn.Module,\n embed_dim: int,\n num_classes: int,\n num_queries: int,\n criterion: nn.Module,\n pixel_mean: List[float] = [123.675, 116.280, 103.530],\n pixel_std: List[float] = [58.395, 57.120, 57.375],\n aux_loss: bool = True,\n select_box_nums_for_evaluation: int = 300,\n device=\"cuda\",\n dn_number: int = 100,\n label_noise_ratio: float = 0.2,\n box_noise_scale: float = 1.0,\n input_format: Optional[str] = \"RGB\",\n vis_period: int = 0,\n ):\n super().__init__()\n # define backbone and position embedding module\n self.backbone = backbone\n self.position_embedding = position_embedding\n\n # define neck module\n self.neck = neck\n\n # number of dynamic anchor boxes and embedding dimension\n self.num_queries = num_queries\n self.embed_dim = embed_dim\n\n # define transformer module\n self.transformer = transformer\n\n # define classification head and box head\n self.class_embed = nn.Linear(embed_dim, num_classes)\n self.bbox_embed = MLP(embed_dim, embed_dim, 4, 3)\n self.num_classes = num_classes\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # denoising\n self.label_enc = nn.Embedding(num_classes, embed_dim)\n self.dn_number = dn_number\n self.label_noise_ratio = label_noise_ratio\n self.box_noise_scale = box_noise_scale\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n\n # initialize weights\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for _, neck_layer in self.neck.named_modules():\n if isinstance(neck_layer, nn.Conv2d):\n nn.init.xavier_uniform_(neck_layer.weight, gain=1)\n nn.init.constant_(neck_layer.bias, 0)\n\n # if two-stage, the last class_embed and bbox_embed is for region proposal generation\n num_pred = transformer.decoder.num_layers + 1\n self.class_embed = nn.ModuleList([copy.deepcopy(self.class_embed) for i in range(num_pred)])\n self.bbox_embed = nn.ModuleList([copy.deepcopy(self.bbox_embed) for i in range(num_pred)])\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n\n # two-stage\n self.transformer.decoder.class_embed = self.class_embed\n self.transformer.decoder.bbox_embed = self.bbox_embed\n\n # hack implementation for two-stage\n for bbox_embed_layer in self.bbox_embed:\n nn.init.constant_(bbox_embed_layer.layers[-1].bias.data[2:], 0.0)\n\n # set topk boxes selected for inference\n self.select_box_nums_for_evaluation = select_box_nums_for_evaluation\n\n # the period for visualizing training samples\n self.input_format = input_format\n self.vis_period = vis_period\n if vis_period > 0:\n assert input_format is not None, \"input_format is required for visualization!\"\n\n\n def forward(self, batched_inputs):\n \"\"\"Forward function of `DINO` which excepts a list of dict as inputs.\n\n Args:\n batched_inputs (List[dict]): A list of instance dict, and each instance dict must consists of:\n - dict[\"image\"] (torch.Tensor): The unnormalized image tensor.\n - dict[\"height\"] (int): The original image height.\n - dict[\"width\"] (int): The original image width.\n - dict[\"instance\"] (detectron2.structures.Instances):\n Image meta informations and ground truth boxes and labels during training.\n Please refer to\n https://detectron2.readthedocs.io/en/latest/modules/structures.html#detectron2.structures.Instances\n for the basic usage of Instances.\n\n Returns:\n dict: Returns a dict with the following elements:\n - dict[\"pred_logits\"]: the classification logits for all queries (anchor boxes in DAB-DETR).\n with shape ``[batch_size, num_queries, num_classes]``\n - dict[\"pred_boxes\"]: The normalized boxes coordinates for all queries in format\n ``(x, y, w, h)``. These values are normalized in [0, 1] relative to the size of\n each individual image (disregarding possible padding). See PostProcess for information\n on how to retrieve the unnormalized bounding box.\n - dict[\"aux_outputs\"]: Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n\n # original features\n features = self.backbone(images.tensor) # output feature dict\n\n # project backbone features to the reuired dimension of transformer\n # we use multi-scale features in DINO\n multi_level_feats = self.neck(features)\n multi_level_masks = []\n multi_level_position_embeddings = []\n for feat in multi_level_feats:\n multi_level_masks.append(\n F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)\n )\n multi_level_position_embeddings.append(self.position_embedding(multi_level_masks[-1]))\n\n # denoising preprocessing\n # prepare label query embedding\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n input_query_label, input_query_bbox, attn_mask, dn_meta = self.prepare_for_cdn(\n targets,\n dn_number=self.dn_number,\n label_noise_ratio=self.label_noise_ratio,\n box_noise_scale=self.box_noise_scale,\n num_queries=self.num_queries,\n num_classes=self.num_classes,\n hidden_dim=self.embed_dim,\n label_enc=self.label_enc,\n )\n else:\n input_query_label, input_query_bbox, attn_mask, dn_meta = None, None, None, None\n query_embeds = (input_query_label, input_query_bbox)\n\n # feed into transformer\n (\n inter_states,\n init_reference,\n inter_references,\n enc_state,\n enc_reference, # [0..1]\n ) = self.transformer(\n multi_level_feats,\n multi_level_masks,\n multi_level_position_embeddings,\n query_embeds,\n attn_masks=[attn_mask, None],\n )\n # hack implementation for distributed training\n inter_states[0] += self.label_enc.weight[0, 0] * 0.0\n\n # Calculate output coordinates and classes.\n outputs_classes = []\n outputs_coords = []\n for lvl in range(inter_states.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](inter_states[lvl])\n tmp = self.bbox_embed[lvl](inter_states[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class)\n outputs_coords.append(outputs_coord)\n outputs_class = torch.stack(outputs_classes)\n # tensor shape: [num_decoder_layers, bs, num_query, num_classes]\n outputs_coord = torch.stack(outputs_coords)\n # tensor shape: [num_decoder_layers, bs, num_query, 4]\n\n # denoising postprocessing\n if dn_meta is not None:\n outputs_class, outputs_coord = self.dn_post_process(\n outputs_class, outputs_coord, dn_meta\n )\n\n # prepare for loss computation\n output = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(outputs_class, outputs_coord)\n\n # prepare two stage output\n interm_coord = enc_reference\n interm_class = self.transformer.decoder.class_embed[-1](enc_state)\n output[\"enc_outputs\"] = {\"pred_logits\": interm_class, \"pred_boxes\": interm_coord}\n\n if self.training:\n # visualize training samples\n if self.vis_period > 0:\n storage = get_event_storage()\n if storage.iter % self.vis_period == 0:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n self.visualize_training(batched_inputs, results)\n \n # compute loss\n loss_dict = self.criterion(output, targets, dn_meta)\n weight_dict = self.criterion.weight_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n def visualize_training(self, batched_inputs, results):\n from detectron2.utils.visualizer import Visualizer\n\n storage = get_event_storage()\n max_vis_box = 20\n\n for input, results_per_image in zip(batched_inputs, results):\n img = input[\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\n v_gt = Visualizer(img, None)\n v_gt = v_gt.overlay_instances(boxes=input[\"instances\"].gt_boxes)\n anno_img = v_gt.get_image()\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(\n boxes=results_per_image.pred_boxes[:max_vis_box].tensor.detach().cpu().numpy()\n )\n pred_img = v_pred.get_image()\n vis_img = np.concatenate((anno_img, pred_img), axis=1)\n vis_img = vis_img.transpose(2, 0, 1)\n vis_name = \"Left: GT bounding boxes; Right: Predicted boxes\"\n storage.put_image(vis_name, vis_img)\n break # only visualize one image in a batch\n\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]\n\n def prepare_for_cdn(\n self,\n targets,\n dn_number,\n label_noise_ratio,\n box_noise_scale,\n num_queries,\n num_classes,\n hidden_dim,\n label_enc,\n ):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding\n in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if dn_number <= 0:\n return None, None, None, None\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t[\"labels\"])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n return None, None, None, None\n\n dn_number = dn_number // (int(max(known_num) * 2))\n\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t[\"labels\"] for t in targets])\n boxes = torch.cat([t[\"boxes\"] for t in targets])\n batch_idx = torch.cat(\n [torch.full_like(t[\"labels\"].long(), i) for i, t in enumerate(targets)]\n )\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(\n -1\n ) # half of bbox prob\n new_label = torch.randint_like(\n chosen_indice, 0, num_classes\n ) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_padding = int(max(known_num))\n\n pad_size = int(single_padding * 2 * dn_number)\n positive_idx = (\n torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n )\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = (\n torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n )\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part, diff).cuda() * box_noise_scale\n known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to(\"cuda\")\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to(\"cuda\")\n if len(known_num):\n map_known_indice = torch.cat(\n [torch.tensor(range(num)) for num in known_num]\n ) # [1,2, 1,2,3]\n map_known_indice = torch.cat(\n [map_known_indice + single_padding * i for i in range(2 * dn_number)]\n ).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to(\"cuda\") < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[\n single_padding * 2 * i : single_padding * 2 * (i + 1),\n single_padding * 2 * (i + 1) : pad_size,\n ] = True\n if i == dn_number - 1:\n attn_mask[\n single_padding * 2 * i : single_padding * 2 * (i + 1), : single_padding * i * 2\n ] = True\n else:\n attn_mask[\n single_padding * 2 * i : single_padding * 2 * (i + 1),\n single_padding * 2 * (i + 1) : pad_size,\n ] = True\n attn_mask[\n single_padding * 2 * i : single_padding * 2 * (i + 1), : single_padding * 2 * i\n ] = True\n\n dn_meta = {\n \"single_padding\": single_padding * 2,\n \"dn_num\": dn_number,\n }\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta\n\n def dn_post_process(self, outputs_class, outputs_coord, dn_metas):\n if dn_metas and dn_metas[\"single_padding\"] > 0:\n padding_size = dn_metas[\"single_padding\"] * dn_metas[\"dn_num\"]\n output_known_class = outputs_class[:, :, :padding_size, :]\n output_known_coord = outputs_coord[:, :, :padding_size, :]\n outputs_class = outputs_class[:, :, padding_size:, :]\n outputs_coord = outputs_coord[:, :, padding_size:, :]\n\n out = {\"pred_logits\": output_known_class[-1], \"pred_boxes\": output_known_coord[-1]}\n if self.aux_loss:\n out[\"aux_outputs\"] = self._set_aux_loss(output_known_class, output_known_coord)\n dn_metas[\"output_known_lbs_bboxes\"] = out\n return outputs_class, outputs_coord\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # box_cls.shape: 1, 300, 80\n # box_pred.shape: 1, 300, 4\n prob = box_cls.sigmoid()\n topk_values, topk_indexes = torch.topk(\n prob.view(box_cls.shape[0], -1), self.select_box_nums_for_evaluation, dim=1\n )\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, box_cls.shape[2], rounding_mode=\"floor\")\n labels = topk_indexes % box_cls.shape[2]\n\n boxes = torch.gather(box_pred, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n # For each box we assign the best class or the second best if the best on is `no_object`.\n # scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1)\n\n for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(\n zip(scores, labels, boxes, image_sizes)\n ):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets" }, { "identifier": "DINOCriterion", "path": "projects/dino/modeling/dn_criterion.py", "snippet": "class DINOCriterion(TwoStageCriterion):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def forward(self, outputs, targets, dn_metas=None):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n losses = super(DINOCriterion, self).forward(outputs, targets)\n # import pdb;pdb.set_trace()\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n\n aux_num = 0\n if \"aux_outputs\" in outputs:\n aux_num = len(outputs[\"aux_outputs\"])\n dn_losses = self.compute_dn_loss(dn_metas, targets, aux_num, num_boxes)\n losses.update(dn_losses)\n\n return losses\n\n def compute_dn_loss(self, dn_metas, targets, aux_num, num_boxes):\n \"\"\"\n compute dn loss in criterion\n Args:\n dn_metas: a dict for dn information\n training: training or inference flag\n aux_num: aux loss number\n focal_alpha: for focal loss\n \"\"\"\n losses = {}\n if dn_metas and \"output_known_lbs_bboxes\" in dn_metas:\n output_known_lbs_bboxes, dn_num, single_padding = (\n dn_metas[\"output_known_lbs_bboxes\"],\n dn_metas[\"dn_num\"],\n dn_metas[\"single_padding\"],\n )\n dn_idx = []\n for i in range(len(targets)):\n if len(targets[i][\"labels\"]) > 0:\n t = torch.arange(0, len(targets[i][\"labels\"])).long().cuda()\n t = t.unsqueeze(0).repeat(dn_num, 1)\n tgt_idx = t.flatten()\n output_idx = (\n torch.tensor(range(dn_num)) * single_padding\n ).long().cuda().unsqueeze(1) + t\n output_idx = output_idx.flatten()\n else:\n output_idx = tgt_idx = torch.tensor([]).long().cuda()\n\n dn_idx.append((output_idx, tgt_idx))\n l_dict = {}\n for loss in self.losses:\n kwargs = {}\n if \"labels\" in loss:\n kwargs = {\"log\": False}\n l_dict.update(\n self.get_loss(\n loss, output_known_lbs_bboxes, targets, dn_idx, num_boxes * dn_num, **kwargs\n )\n )\n\n l_dict = {k + \"_dn\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n else:\n losses[\"loss_bbox_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n losses[\"loss_giou_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n losses[\"loss_class_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n\n for i in range(aux_num):\n # dn aux loss\n l_dict = {}\n if dn_metas and \"output_known_lbs_bboxes\" in dn_metas:\n output_known_lbs_bboxes_aux = output_known_lbs_bboxes[\"aux_outputs\"][i]\n for loss in self.losses:\n kwargs = {}\n if \"labels\" in loss:\n kwargs = {\"log\": False}\n l_dict.update(\n self.get_loss(\n loss,\n output_known_lbs_bboxes_aux,\n targets,\n dn_idx,\n num_boxes * dn_num,\n **kwargs,\n )\n )\n l_dict = {k + f\"_dn_{i}\": v for k, v in l_dict.items()}\n else:\n l_dict[\"loss_bbox_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n l_dict[\"loss_giou_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n l_dict[\"loss_class_dn\"] = torch.as_tensor(0.0).to(\"cuda\")\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n return losses" } ]
import copy import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.dino.modeling import ( DINO, DINOTransformerEncoder, DINOTransformerDecoder, DINOTransformer, DINOCriterion, )
15,951
model = L(DINO)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DINOTransformer)( encoder=L(DINOTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, num_feature_levels="${..num_feature_levels}", use_checkpoint=False ), decoder=L(DINOTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, ), num_feature_levels=4, two_stage_num_proposals="${..num_queries}", ), embed_dim=256, num_classes=80, num_queries=900, aux_loss=True,
model = L(DINO)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DINOTransformer)( encoder=L(DINOTransformerEncoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, post_norm=False, num_feature_levels="${..num_feature_levels}", use_checkpoint=False ), decoder=L(DINOTransformerDecoder)( embed_dim=256, num_heads=8, feedforward_dim=2048, attn_dropout=0.0, ffn_dropout=0.0, num_layers=6, return_intermediate=True, num_feature_levels="${..num_feature_levels}", use_checkpoint=False, ), num_feature_levels=4, two_stage_num_proposals="${..num_queries}", ), embed_dim=256, num_classes=80, num_queries=900, aux_loss=True,
criterion=L(DINOCriterion)(
7
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
16,650
default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), # overlap: int = Input( # description="The length of overlapping part. Last `overlap` seconds of previous generation output audio is given to the next generation's audio prompt for continuation. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=5, le=15, ge=1 # ), # in_step_beat_sync: bool = Input( # description="If `True`, beat syncing is performed every generation step. In this case, audio prompting with EnCodec token will not be used, so that the audio quality might be degraded on and on along encoding-decoding sequences of the generation steps. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=False, # ), # amp_rate: float = Input( # description="Amplifying the output audio to prevent volume diminishing along generations. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=1.2, # ), ) -> Path: if prompt is None: raise ValueError("Must provide `prompt`.") if not music_input: raise ValueError("Must provide `music_input`.") if prompt is None: prompt = '' # tmp_path = 'tmp' # if os.path.isdir(tmp_path): # import shutil # shutil.rmtree(tmp_path) # os.mkdir(tmp_path) if os.path.isdir('demix'): shutil.rmtree('demix') if os.path.isdir('spec'): shutil.rmtree('spec') # Loading models if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"/src/musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'/src/musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True if 'stereo' in model_version: channel = 2 else: channel = 1 if large_chord_voca is False: # Switching Chord Prediction model to 25 vocab (smaller) self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.feature['large_voca']=False self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model['num_chords']=25 self.model.lm.condition_provider.conditioners['self_wav'].chroma.model_file='audiocraft/modules/btc/test/btc_model.pt' self.model.lm.condition_provider.conditioners['self_wav'].chroma.idx_to_chord = idx2chord loaded = torch.load('audiocraft/modules/btc/test/btc_model.pt') self.model.lm.condition_provider.conditioners['self_wav'].chroma.mean = loaded['mean'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.std = loaded['std'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.model = BTC_model(config=self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model).to(self.device) self.model.lm.condition_provider.conditioners['self_wav'].chroma.model.load_state_dict(loaded['model']) model = self.model model.lm.eval() if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") # in_step_beat_sync = in_step_beat_sync set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) print(f"Using seed {seed}") # Music Structure Analysis music_input_analysis = allin1.analyze(music_input) music_input, sr = torchaudio.load(music_input) print("BPM : ", music_input_analysis.bpm) if not beat_sync_threshold or beat_sync_threshold == -1: if music_input_analysis.bpm is not None: beat_sync_threshold = 1.1/(int(music_input_analysis.bpm)/60) else: beat_sync_threshold = 0.75 if music_input_analysis.bpm is not None: prompt = prompt + f', bpm : {int(music_input_analysis.bpm)}' music_input = music_input[None] if music_input.dim() == 2 else music_input duration = music_input.shape[-1]/sr wav_sr = model.sample_rate vocal, background = self.separate_vocals(music_input, sr)
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path ) lm = load_lm_model(model_id, device=device, cache_dir=model_path) return MusicGen(model_id, compression_model, lm) def predict( self, model_version: str = Input( description="Model type. Computations take longer when using `large` or `stereo` models.", default="stereo-chord", choices=["stereo-chord", "stereo-chord-large", "chord", "chord-large"] ), prompt: str = Input( description="A description of the music you want to generate.", default=None ), music_input: Path = Input( description="An audio file input for the remix.", default=None, ), multi_band_diffusion: bool = Input( description="If `True`, the EnCodec tokens will be decoded with MultiBand Diffusion. Not compatible with `stereo` models.", default=False, ), normalization_strategy: str = Input( description="Strategy for normalizing audio.", default="loudness", choices=["loudness", "clip", "peak", "rms"], ), # bpm_hard_sync: bool = Input( # description="If `True`, respective downbeats aren't analyzed, but are calculated from the bpm value detected and the first downbeat recognized instead. If the input audio has a changing bpm value, must be set `False`.", # default=True, # ), beat_sync_threshold: float = Input( description="When beat syncing, if the gap between generated downbeat timing and input audio downbeat timing is larger than `beat_sync_threshold`, consider the beats are not corresponding. If `None` or `-1`, `1.1/(bpm/60)` will be used as the value. 0.75 is a good value to set.", default=None, ), large_chord_voca: bool = Input( description="If `True`, more chords like 7th, diminished and etc are used. If `False` only 12 major and 12 minor chords are used.", default=True ), chroma_coefficient: float = Input( description="Coefficient value multiplied to multi-hot chord chroma.", default=1.0, ge=0.5, le=2.0 ), top_k: int = Input( description="Reduces sampling to the k most likely tokens.", default=250 ), top_p: float = Input( description="Reduces sampling to tokens with cumulative probability of p. When set to `0` (default), top_k sampling is used.", default=0.0, ), temperature: float = Input( description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity.", default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), # overlap: int = Input( # description="The length of overlapping part. Last `overlap` seconds of previous generation output audio is given to the next generation's audio prompt for continuation. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=5, le=15, ge=1 # ), # in_step_beat_sync: bool = Input( # description="If `True`, beat syncing is performed every generation step. In this case, audio prompting with EnCodec token will not be used, so that the audio quality might be degraded on and on along encoding-decoding sequences of the generation steps. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=False, # ), # amp_rate: float = Input( # description="Amplifying the output audio to prevent volume diminishing along generations. (This will be fixed with the optimal value and be hidden, when releasing.)", # default=1.2, # ), ) -> Path: if prompt is None: raise ValueError("Must provide `prompt`.") if not music_input: raise ValueError("Must provide `music_input`.") if prompt is None: prompt = '' # tmp_path = 'tmp' # if os.path.isdir(tmp_path): # import shutil # shutil.rmtree(tmp_path) # os.mkdir(tmp_path) if os.path.isdir('demix'): shutil.rmtree('demix') if os.path.isdir('spec'): shutil.rmtree('spec') # Loading models if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"/src/musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'/src/musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True if 'stereo' in model_version: channel = 2 else: channel = 1 if large_chord_voca is False: # Switching Chord Prediction model to 25 vocab (smaller) self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.feature['large_voca']=False self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model['num_chords']=25 self.model.lm.condition_provider.conditioners['self_wav'].chroma.model_file='audiocraft/modules/btc/test/btc_model.pt' self.model.lm.condition_provider.conditioners['self_wav'].chroma.idx_to_chord = idx2chord loaded = torch.load('audiocraft/modules/btc/test/btc_model.pt') self.model.lm.condition_provider.conditioners['self_wav'].chroma.mean = loaded['mean'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.std = loaded['std'] self.model.lm.condition_provider.conditioners['self_wav'].chroma.model = BTC_model(config=self.model.lm.condition_provider.conditioners['self_wav'].chroma.config.model).to(self.device) self.model.lm.condition_provider.conditioners['self_wav'].chroma.model.load_state_dict(loaded['model']) model = self.model model.lm.eval() if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") # in_step_beat_sync = in_step_beat_sync set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) print(f"Using seed {seed}") # Music Structure Analysis music_input_analysis = allin1.analyze(music_input) music_input, sr = torchaudio.load(music_input) print("BPM : ", music_input_analysis.bpm) if not beat_sync_threshold or beat_sync_threshold == -1: if music_input_analysis.bpm is not None: beat_sync_threshold = 1.1/(int(music_input_analysis.bpm)/60) else: beat_sync_threshold = 0.75 if music_input_analysis.bpm is not None: prompt = prompt + f', bpm : {int(music_input_analysis.bpm)}' music_input = music_input[None] if music_input.dim() == 2 else music_input duration = music_input.shape[-1]/sr wav_sr = model.sample_rate vocal, background = self.separate_vocals(music_input, sr)
audio_write(
5
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_metrics.py
[ { "identifier": "ConsistencyScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class ConsistencyScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the consistency of a dataset.\n\n Consistency is measured as the number of ratio of instances that have a\n different label from the k=5 nearest neighbors.\n\n Perfect score\n A perfect score for this metric is 0, meaning that the dataset does\n not have different labels for instances that are similar to one another.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ConsistencyScorer\n scorer = ConsistencyScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(self, protected_attributes: Union[pd.Series, np.ndarray, List, str]):\n super().__init__(protected_attributes=protected_attributes, metric=consistency)" }, { "identifier": "DatasetStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class DatasetStatisticalParityScorer(_DatasetFairnessScorer):\n \"\"\"\n Measures the statistical parity [1] of a dataset. Statistical parity (also\n known as Base Rate or Disparate Impact) for a dataset states that a dataset\n is unbiased if the label is independent of the protected attribute.\n\n For each subgroup, statistical parity is computed as the ratio of positive\n labels in a subgroup.\n\n Statistical Parity (also known as Base Rate or Disparate Impact) is\n calculated as PL / N, where PL and N are the number of Positive Labels and\n total number of instances, respectively.\n\n Perfect score\n A perfect score for this metric means that the dataset does not have\n a different ratio of positive labels for a subgroup than it does for\n the rest of the subgroups. For example, if the protected attributes\n are race and sex, then a perfect statistical parity would mean that\n all combinations of values for race and sex have identical ratios of\n positive labels. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import DatasetStatisticalParityScorer\n scorer = DatasetStatisticalParityScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=dataset_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "SmoothedEDFScorer", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "class SmoothedEDFScorer(_SimpleDatasetFairnessScorer):\n \"\"\"\n Measures the smoothed Empirical Differential Fairness (EDF) of a dataset, as\n proposed by Foulds et al. [1].\n\n Smoothed EDF returns the minimal exponential deviation of positive target\n ratios comparing a subgroup to the rest of the subgroups.\n\n This metric is related to :class:`.DatasetStatisticalParity` with\n `reduction='max'` and `distance_measure='ratio'`, with the only difference\n being that :class:`.SmoothedEDFScorer` returns a logarithmic value instead.\n\n Perfect score\n A perfect score for this metric is 0, meaning that the dataset does\n not have a different ratio of positive labels for a subgroup than\n it does for the rest of the subgroups. For example, if the\n protected attributes are race and sex, then a perfect smoothed EDF\n would mean that all combinations of values for race and sex have\n identical ratios of positive labels.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n\n References\n ----------\n [1] `Foulds, James R., et al. \"An intersectional definition of fairness.\"\n 2020 IEEE 36th International Conference on Data Engineering (ICDE).\n IEEE, 2020. <https://arxiv.org/abs/1807.08362>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import SmoothedEDFScorer\n scorer = SmoothedEDFScorer(['race', 'sex'])\n scorer(X=X, y_true=y_true)\n scorer(None, X, y_true)\n \"\"\"\n\n def __init__(self, protected_attributes: Union[pd.Series, np.ndarray, List, str]):\n super().__init__(protected_attributes=protected_attributes, metric=smoothed_edf)" }, { "identifier": "consistency", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def consistency(y_true: Union[pd.Series, np.ndarray, List], subgroups: pd.DataFrame):\n \"\"\"\n Measures the consistency of a dataset.\n\n For more details, refer to :class:`.ConsistencyScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import consistency\n subgroups = X[['race', 'sex']]\n consistency(y_true, subgroups)\n \"\"\"\n # Need to read with [0] because consistency returns an array of size 1.\n return _simple_dataset_metric(y_true, subgroups, metric=\"consistency\")[0]" }, { "identifier": "dataset_statistical_parity", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def dataset_statistical_parity(\n y_true: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: str = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the statistical parity of a dataset.\n\n For more details, refer to :class:`.DatasetStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import dataset_statistical_parity\n subgroups = X[['race', 'sex']]\n dataset_statistical_parity(y_true, subgroups)\n \"\"\"\n return _dataset_metric(\n y_true,\n subgroups,\n metric=\"base_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "smoothed_edf", "path": "guardian_ai/fairness/metrics/dataset.py", "snippet": "def smoothed_edf(y_true: Union[pd.Series, np.ndarray, List], subgroups: pd.DataFrame):\n \"\"\"\n Measures the smoothed Empirical Differential Fairness (EDF) of a dataset, as\n proposed by Foulds et al. [1].\n\n For more details, refer to :class:`.SmoothedEDFScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n\n References\n ----------\n [1] `Foulds, James R., et al. \"An intersectional definition of fairness.\"\n 2020 IEEE 36th International Conference on Data Engineering (ICDE).\n IEEE, 2020. <https://arxiv.org/abs/1807.08362>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import smoothed_edf\n subgroups = X[['race', 'sex']]\n smoothed_edf(y_true, subgroups)\n \"\"\"\n return _simple_dataset_metric(\n y_true, subgroups, metric=\"smoothed_empirical_differential_fairness\"\n )" }, { "identifier": "EqualizedOddsScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class EqualizedOddsScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n The disparity is measured by comparing the true positive and false positive\n rates on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n False Positive Rate (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Equalized Odds [1] is computed by taking the maximum distance between\n TPR and FPR for a subgroup against the rest of the subgroups.\n\n Perfect score\n A perfect score for this metric means that the model has the same TPR and\n FPR when comparing a subgroup to the rest of the subgroups. For example,\n if the protected attributes are race and sex, then a perfect\n Equalized Odds disparity would mean that all combinations of values for\n race and sex have identical TPR and FPR. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import EqualizedOddsScorer\n scorer = EqualizedOddsScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=equalized_odds,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ErrorRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ErrorRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the error rate on\n instances of a subgroup against the rest of the subgroups.\n\n Error Rate (also known as inaccuracy) is calculated as\n (FP + FN) / N, where FP and FN are the number of false positives and\n false negatives, respectively, while N is the total Number of\n instances.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect error rate disparity would\n mean that all combinations of values for race and sex have identical\n error rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ErrorRateScorer\n scorer = ErrorRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=error_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseDiscoveryRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseDiscoveryRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n discovery rate on instances of a subgroup against the rest of the\n subgroups.\n\n False Discovery Rate (also known as FDR) is calculated as\n FP / (FP + TP), where FP and TP are the number of false positives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false discovery rate disparity\n would mean that all combinations of values for race and sex have identical\n false discovery rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseDiscoveryRateScorer\n scorer = FalseDiscoveryRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_discovery_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseNegativeRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseNegativeRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n negative rate on instances of a subgroup against the rest of the subgroups.\n\n False Negative Rate [1] (also known as FNR or miss rate) is calculated as\n FN / (FN + TP), where FN and TP are the number of false negatives and\n true positives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false negative rate disparity\n would mean that all combinations of values for race and sex have identical\n false negative rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseNegativeRateScorer\n scorer = FalseNegativeRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_negative_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalseOmissionRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalseOmissionRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n omission rate on instances of a subgroup against the rest of the subgroups.\n\n False Omission Rate (also known as FOR) is calculated as\n FN / (FN + TN), where FN and TN are the number of false negatives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not make more\n mistakes on the negative class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false omission rate disparity\n would mean that all combinations of values for race and sex have identical\n false omission rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalseOmissionRateScorer\n scorer = FalseOmissionRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_omission_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "FalsePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class FalsePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For each subgroup, the disparity is measured by comparing the false\n positive rate on instances of a subgroup against the rest of the subgroups.\n\n False Positive Rate [1] (also known as FPR or fall-out) is calculated as\n FP / (FP + TN), where FP and TN are the number of false positives and\n true negatives, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not incorrectly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect false positive rate disparity\n would mean that all combinations of values for race and sex have identical\n false positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Alexandra Chouldechova. \"Fair Prediction with Disparate Impact: A Study\n of Bias in Recidivism Prediction Instruments\". Big Data (2016).\n <https://www.liebertpub.com/doi/10.1089/big.2016.0047>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import FalsePositiveRateScorer\n scorer = FalsePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=false_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "ModelStatisticalParityScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class ModelStatisticalParityScorer(_ModelFairnessScorer): # noqa: D412\n \"\"\"\n Measure the statistical parity [1] of a model's output between all subgroup pairs.\n\n Statistical parity (also known as Base Rate or Disparate Impact) states that\n a predictor is unbiased if the prediction is independent of the protected\n attribute.\n\n Statistical Parity is calculated as PP / N, where PP and N are the number of\n Positive Predictions and total Number of predictions made, respectively.\n\n Perfect score\n A perfect score for this metric means that the model does not predict\n positively any of the subgroups at a different rate than it does for the\n rest of the subgroups. For example, if the protected attributes are race\n and sex, then a perfect statistical parity would mean that all combinations\n of values for race and sex have identical ratios of positive predictions.\n Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n\n References\n ----------\n [1] `Cynthia Dwork et al. \"Fairness Through Awareness\". Innovations in\n Theoretical Computer Science. 2012. <https://arxiv.org/abs/1104.3913>`_\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import ModelStatisticalParityScorer\n\n scorer = ModelStatisticalParityScorer(['race', 'sex'])\n scorer(model, X, y_true)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n scorer(model, X)\n \"\"\" # noqa: D412\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=model_statistical_parity,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )\n\n def __call__(\n self,\n model: Any,\n X: pd.DataFrame,\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n supplementary_features: Optional[pd.DataFrame] = None,\n ):\n \"\"\"\n Compute the metric using a model's predictions on a given array\n of instances ``X``.\n\n Parameters\n ----------\n model: Any\n Object that implements a `predict(X)` function to collect\n categorical predictions.\n X : pandas.DataFrame\n Array of instances to compute the metric on.\n y_true : pandas.Series, numpy.ndarray, list, or None, default=None\n Array of groundtruth labels.\n supplementary_features : pandas.DataFrame, or None, default=None\n Array of supplementary features for each instance. Used in case\n one attribute in ``self.protected_attributes`` is not contained by\n ``X`` (e.g. if the protected attribute is not used by the model).\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to ``self.reduction``.\n\n\n Raises\n ------\n GuardianAIValueError\n - if a feature is present in both ``X``\n and ``supplementary_features``.\n\n \"\"\"\n y_pred = model.predict(X)\n\n subgroups = self._get_check_subgroups(X, supplementary_features)\n\n return self.metric(\n y_true, y_pred, subgroups, self.distance_measure, self.reduction\n )" }, { "identifier": "TheilIndexScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TheilIndexScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n Intuitively, the Theil Index can be thought of as a measure of the\n divergence between a subgroup's different error distributions (i.e. false\n positives and false negatives) against the rest of the subgroups.\n\n Perfect score\n The perfect score for this metric is 0, meaning that the model does not\n have a different error distribution for any subgroup when compared to the\n rest of the subgroups. For example, if the protected attributes are\n race and sex, then a perfect Theil Index disparity would mean that all\n combinations of values for race and sex have identical error\n distributions.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TheilIndexScorer\n scorer = TheilIndexScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=theil_index,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=True,\n )" }, { "identifier": "TruePositiveRateScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class TruePositiveRateScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive rate between\n all subgroup pairs (also known as equal opportunity).\n\n For each subgroup, the disparity is measured by comparing the true positive\n rate on instances of a subgroup against the rest of the subgroups.\n\n True Positive Rate [1] (also known as TPR, recall, or sensitivity) is\n calculated as TP / (TP + FN), where TP and FN are the number of true\n positives and false negatives, respectively.\n\n\n Perfect score\n A perfect score for this metric means that the model does not correctly\n predict the positive class for any of the subgroups more often than it\n does for the rest of the subgroups. For example, if the protected\n attributes are race and sex, then a perfect true positive rate disparity\n would mean that all combinations of values for race and sex have\n identical true positive rates. Perfect values are:\n\n - 1 if using ``'ratio'`` as ``distance_measure``.\n - 0 if using ``'diff'`` as ``distance_measure``.\n\n Parameters\n ----------\n protected_attributes: pandas.Series, numpy.ndarray, list, str\n Array of attributes or single attribute that should be treated as\n protected. If an attribute is protected, then all of its unique\n values are considered as subgroups.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n References\n ----------\n [1] `Moritz Hardt et al. \"Equality of Opportunity in Supervised Learning\".\n Advances in Neural Information Processing Systems. 2016.\n <https://arxiv.org/pdf/1610.02413.pdf>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import TruePositiveRateScorer\n scorer = TruePositiveRateScorer(['race', 'sex'])\n scorer(model, X, y_true)\n \"\"\"\n\n def __init__(\n self,\n protected_attributes: Union[pd.Series, np.ndarray, List, str],\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n ):\n super().__init__(\n protected_attributes=protected_attributes,\n metric=true_positive_rate,\n distance_measure=distance_measure,\n reduction=reduction,\n allow_distance_measure_none=False,\n )" }, { "identifier": "equalized_odds", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def equalized_odds(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n For more details, refer to :class:`.EqualizedOddsScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import equalized_odds\n subgroups = X[['race', 'sex']]\n equalized_odds(y_true, y_pred, subgroups)\n \"\"\"\n tpr = true_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n\n fpr = false_positive_rate(\n y_true,\n y_pred,\n subgroups,\n distance_measure=distance_measure,\n reduction=reduction,\n )\n if isinstance(tpr, dict):\n eq_odds = {}\n for key in tpr:\n eq_odds[key] = np.nanmax([tpr[key], fpr[key]])\n else:\n eq_odds = np.nanmax([tpr, fpr])\n\n return eq_odds" }, { "identifier": "error_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def error_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's error rate between all subgroup pairs.\n\n For more details, refer to :class:`.ErrorRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import error_rate\n subgroups = X[['race', 'sex']]\n error_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"error_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_discovery_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_discovery_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false discovery rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseDiscoveryRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_discovery_rate\n subgroups = X[['race', 'sex']]\n false_discovery_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_discovery_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_negative_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_negative_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false negative rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseNegativeRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_negative_rate\n subgroups = X[['race', 'sex']]\n false_negative_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_negative_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_omission_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_omission_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false omission rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalseOmissionRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_omission_rate\n subgroups = X[['race', 'sex']]\n false_omission_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_omission_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "false_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def false_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's false positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.FalsePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import false_positive_rate\n subgroups = X[['race', 'sex']]\n false_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"false_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "model_statistical_parity", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def model_statistical_parity(\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n y_pred: Optional[Union[pd.Series, np.ndarray, List]] = None,\n subgroups: Optional[pd.DataFrame] = None,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measure the statistical parity of a model's output between all subgroup pairs.\n\n For more details, refer to :class:`.ModelStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list or None, default=None\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list or None, default=None\n Array of model predictions.\n subgroups : pandas.DataFrame or None, default=None\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If Value of None is received for either `y_pred` or `subgroups`.\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import model_statistical_parity\n subgroups = X[['race', 'sex']]\n model_statistical_parity(y_true, y_pred, subgroups)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n model_statistical_parity(None, y_pred, subgroups)\n model_statistical_parity(y_pred=y_pred, subgroups=subgroups)\n \"\"\" # noqa: D412\n\n if y_pred is None or subgroups is None:\n raise GuardianAIValueError(\n \"Value of None was received for either `y_pred` or `subgroups`. \"\n \"This may be due to calling the metric using only 2 positional \"\n \"arguments. If this is the case, either call the function by \"\n \"passing ``None`` as the first argument or use named arguments for \"\n \"`y_pred` and `subgroups`.\"\n )\n\n return _model_metric(\n None,\n y_pred,\n subgroups,\n metric=\"selection_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=True,\n allow_distance_measure_none=False,\n )" }, { "identifier": "theil_index", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def theil_index(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: Optional[str] = None,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's predictions according to groundtruth\n labels, as proposed by Speicher et al. [1].\n\n For more details, refer to :class:`.TheilIndexScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str or None, default=None\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If distance_measure values are given to Theil Index.\n\n References\n ----------\n [1]: `Speicher, Till, et al. \"A unified approach to quantifying algorithmic\n unfairness: Measuring individual & group unfairness via inequality indices.\"\n Proceedings of the 24th ACM SIGKDD international conference on knowledge\n discovery & data mining. 2018. <https://arxiv.org/abs/1807.00787>`_\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import theil_index\n subgroups = X[['race', 'sex']]\n theil_index(y_true, y_pred, subgroups)\n \"\"\"\n\n if distance_measure is not None and not isinstance(\n distance_measure, _DistanceMetric\n ):\n raise GuardianAIValueError(\n \"Theil Index does not accept distance_measure values. It should\"\n \"always be set to ``None``.\"\n )\n\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"between_group_theil_index\",\n distance_measure=None,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=True,\n )" }, { "identifier": "true_positive_rate", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def true_positive_rate(\n y_true: Union[pd.Series, np.ndarray, List],\n y_pred: Union[pd.Series, np.ndarray, List],\n subgroups: pd.DataFrame,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measures the disparity of a model's true positive rate between all subgroup pairs.\n\n For more details, refer to :class:`.TruePositiveRateScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list\n Array of model predictions.\n subgroups : pandas.DataFrame\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n\n Examples\n --------\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import true_positive_rate\n subgroups = X[['race', 'sex']]\n true_positive_rate(y_true, y_pred, subgroups)\n \"\"\"\n return _model_metric(\n y_true,\n y_pred,\n subgroups,\n metric=\"true_positive_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=False,\n allow_distance_measure_none=False,\n )" }, { "identifier": "GuardianAITypeError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAITypeError(TypeError, GuardianAIError):\n \"\"\"Exception raised for generic type issues.\"\"\"\n\n pass" }, { "identifier": "GuardianAIValueError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAIValueError(ValueError, GuardianAIError):\n \"\"\"Exception raised for unexpected values.\"\"\"\n\n pass" }, { "identifier": "get_dummy_dataset", "path": "tests/utils.py", "snippet": "def get_dummy_dataset(\n n_samples=5000,\n n_features=10,\n n_classes=2,\n types=[str, float, bool, int],\n content=[],\n contain_null=False,\n null_ratio=0.3,\n dtime_types=[],\n tz_aware=False,\n reg_range=10.0,\n cat_range=30,\n random_seed=9999,\n imb_factor=1.0,\n task=\"classification\",\n **kwargs,\n):\n \"\"\"\n Generates a dummy dataset and returns its corresponding ope/oml\n dataframe:\n dataset shape n_samples x n_features.\n\n types: column types you wish to generate (random number of columns=\n n_features types are generated, with at least one of each type).\n\n content: list of tuples (dtype, feature) specifying bad column\n features. Features can be 'const' - to make all values in column\n constant, or value between 0 and 1 which indicates percentage of\n missing values in a column\n\n dtime_types: datetime column types to generate. Acceptable types\n are: ['datetime', 'date', 'time', 'timedelta', 'datetimetz']\n\n n_classes: number of target classes (only used for classification)\n\n reg_range: range of target for regression datasets, not used for\n classification\n\n cat_range: maximum number of unique values for the categorical\n features\n\n imb_factor: ~ class_ratio = minority_class_size/majority_class_size\n approximately controls dataset target imbalance\n (only used for classification).\n\n \"\"\"\n np.random.seed(random_seed)\n allowed_dtime_types = [\n \"datetime\",\n \"date\",\n \"time\",\n \"timedelta\",\n \"datetimez\",\n \"Timestamp\",\n ]\n\n # sanity checks\n assert (\n n_samples >= n_classes\n ), \"Number of samples has to be greater than num of classes\"\n assert (imb_factor > 0) and (\n imb_factor <= 1.0\n ), \"imb_factor has to be in range of (0, 1.0]\"\n assert len(types) == len(set(types)), \"types inside the list must be unique\"\n assert len(dtime_types) == len(\n set(dtime_types)\n ), \"dtime_types inside the list must be unique\"\n assert (\n len(dtime_types) + len(types) <= n_features\n ), \"provided number of feature types is more than n_features\"\n assert task in [\n \"classification\",\n \"regression\",\n \"anomaly_detection\",\n ], \"Task must be one of classification or regression\"\n assert all(\n x for x in dtime_types if x in allowed_dtime_types\n ), \"dtime_types: {} outside of allowed: {}\".format(dtime_types, allowed_dtime_types)\n\n extra_types, extra_feats, extra_cols = [], [], 0\n if content != []:\n extra_cols = len(content)\n extra_types = [x for x, _ in content]\n extra_feats = [x for _, x in content]\n\n # target labels for the dataset\n if task == \"classification\" or task == \"anomaly_detection\":\n # assign class counts based on geometric distribution of classes based on imb_factor\n class_weights = np.geomspace(imb_factor, 1.0, num=n_classes)\n class_counts = [\n max(1, int(n_samples * x / np.sum(class_weights))) for x in class_weights\n ]\n class_excess = np.sum(class_counts) - n_samples\n class_counts[-1] -= class_excess\n\n # create labels based on class counts and shuffle them\n y = np.hstack(\n [np.full((1, count), cl) for cl, count in enumerate(class_counts)]\n ).ravel()\n np.random.shuffle(y.astype(int))\n y = y.tolist()\n elif task == \"regression\":\n # noise between (-reg_range/2, reg_range/2) for regression\n y = reg_range * np.random.random(size=(1, n_samples, 1)) + reg_range / 2.0\n y = y.reshape(1, n_samples).ravel().tolist()\n\n # tally total number of features\n all_feat_types = types + dtime_types + extra_types\n total_feat_types = len(types) + len(dtime_types)\n if total_feat_types > 0:\n feat_col_types = np.random.choice(\n range(0, total_feat_types), size=n_features - total_feat_types\n ).tolist()\n feat_col_types += list(\n range(0, total_feat_types)\n ) # to ensure at least one of each type\n\n else:\n feat_col_types = []\n feat_col_types += list(range(total_feat_types, total_feat_types + len(extra_types)))\n features = []\n col_types = []\n tz = {}\n # extra_features provided in content, and certain datetime columns are handled differently\n # they get added as pandas Series or DataFrames to rest of features in the end\n special_cols_num, special_pd_df = [], []\n extra_features = pd.DataFrame()\n for i, t in enumerate(feat_col_types):\n assert t < total_feat_types + len(extra_types)\n typ = all_feat_types[t]\n if typ is str:\n high_val = np.random.randint(3, cat_range)\n feat = np.random.randint(0, high_val, size=n_samples).tolist()\n feat = [\"STR{}\".format(val) for val in feat]\n elif typ is int:\n low_val = np.random.randint(-50000, -10)\n high_val = np.random.randint(10, 50000)\n feat = np.random.randint(low_val, high_val, size=n_samples).tolist()\n elif typ is float:\n feat = np.random.rand(n_samples).tolist()\n elif typ is bool:\n feat = np.random.randint(0, 2, size=n_samples).tolist()\n feat = [bool(val) for val in feat]\n elif typ in allowed_dtime_types:\n if typ == \"datetime\":\n # generating random datetime\n deltas = random.sample(range(1, 172800000), n_samples)\n d1 = datetime.datetime.now() - datetime.timedelta(days=2000)\n d2 = datetime.datetime.now()\n generated_datetime = []\n for d in deltas:\n generated_datetime.append(d1 + datetime.timedelta(seconds=d))\n feat = generated_datetime\n elif typ == \"timedelta\":\n feat = n_samples * [datetime.timedelta()]\n elif typ == \"time\":\n feat = n_samples * [datetime.time()]\n elif typ == \"date\":\n feat = n_samples * [datetime.date(2019, 9, 11)]\n elif typ == \"datetimez\":\n special_cols_num.append(i)\n special_pd_df.append(\n pd.date_range(start=0, periods=n_samples, tz=\"UTC\")\n )\n feat = n_samples * [\n datetime.date(2019, 9, 11)\n ] # needs to be handled in special way b/c it's already pandas obj\n else:\n raise Exception(\"Unrecognized datetime type of column\")\n else:\n raise Exception(\"Unrecognized type of column\")\n\n # If index reached the last extra_col number of feature types, start modifying features\n # and adding them to extra_features DataFrame instead of list of features\n if extra_cols > 0 and i >= (len(feat_col_types) - extra_cols):\n feat_idx = i - (len(feat_col_types) - extra_cols)\n if isinstance(extra_feats[feat_idx], numbers.Number):\n # missing values given by extra_feats[feat_idx] percentage of instances\n assert (\n extra_feats[feat_idx] <= 1.0 and extra_feats[feat_idx] >= 0\n ), \"feature in content has to be ratio between 0 and 1\"\n ids = np.random.choice(\n range(0, n_samples), size=int(extra_feats[feat_idx] * n_samples)\n ).astype(int)\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat[ids] = np.nan\n elif extra_feats[feat_idx] == \"const\":\n # constant column, set all rows to be same as the first instance\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat = feat[0]\n extra_features[i] = feat\n else: # add features to the list\n features.append(feat)\n col_types.append(type(feat[0]).__name__)\n\n # if task == 'regression':\n # # Add scaled target column for regression so that score is positive\n # features.append([-0.5*x for x in y])\n # col_types.append('float') # target column type is int\n\n # Add target column and convert all types to pandas dtypes\n features.append(y)\n col_types.append(\n \"int\" if task == \"classification\" else \"float\"\n ) # target column type is int\n pd_col_types = map_col_types(col_types)\n pd_df = pd.DataFrame(features).T # transpose to get samples x features\n num_feats = len(features) - 1\n columns = list(range(0, num_feats)) if num_feats > 0 else []\n columns = columns + [\"target\"]\n pd_df.columns = columns # rename columns\n\n # handle special column from datettime: replace placeholder with pandas.date_range columns\n for i, col in enumerate(special_cols_num):\n pd_df[col] = special_pd_df[i]\n pd_col_types[col] = pd_df.dtypes[col]\n\n # assign datatypes to pd dataframe for non-datetime types\n columns_types_all = list(zip(columns, pd_col_types))\n columns_types_nodtime = [\n (name, typ)\n for (name, typ) in columns_types_all\n if typ not in allowed_dtime_types\n ]\n columns_types_dtime = [\n (name, typ) for (name, typ) in columns_types_all if typ in allowed_dtime_types\n ]\n pd_df = pd_df.astype(dict(columns_types_nodtime)) # cast types on non-dtime columns\n\n # assign datatypes to pd dataframe only for datetime types\n for col, col_type in columns_types_dtime:\n if col_type == \"timedelta\":\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n elif col_type == \"datetimez\":\n pd_df[col] = pd_df[col]\n elif col_type == \"datetime\":\n pd_df[col] = pd.to_datetime(pd_df[col], errors=\"coerce\")\n if contain_null:\n pd_df[col] = generate_null(pd_df[col], null_ratio)\n if tz_aware:\n tz[str(col)] = pytz.all_timezones[\n np.random.randint(len(pytz.all_timezones))\n ]\n else:\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n\n # add extra features columns that were provided by content\n pd_df[pd_df.shape[1] + extra_features.columns] = extra_features\n\n # Convert all the column names to string type (mainly for FS min_features [] tests)\n pd_df.columns = [str(col) for col in pd_df.columns]\n\n if tz_aware:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"], tz\n else:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"]" } ]
import math import numpy as np import pandas as pd import pytest import sklearn from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from guardian_ai.fairness.metrics.dataset import ( ConsistencyScorer, DatasetStatisticalParityScorer, SmoothedEDFScorer, consistency, dataset_statistical_parity, smoothed_edf, ) from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
18,356
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(12345) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) MODEL_X_Y_SCORERS = { "model_statistical_parity_scorer": ModelStatisticalParityScorer, "true_positive_rate_scorer": TruePositiveRateScorer,
"false_positive_rate_scorer": FalsePositiveRateScorer,
11
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(Item(**info))\n return item_list" }, { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "HumanBidder", "path": "src/human_bidder.py", "snippet": "class HumanBidder(Bidder):\n name: str\n human_name: str = \"Adam\"\n budget: int\n auction_hash: str\n \n cur_item_id = 0\n items: list = []\n withdraw: bool = False\n \n engagement_count: int = 0\n original_budget: int = 0\n profit: int = 0\n items_won = []\n \n all_bidders_status = {} # track others' profit\n \n # essential for demo\n need_input: bool = False\n semaphore: int = 0 # if needs input, then semaphore is set as 1, else waits.\n input_box: str = None # global variable for accepting user input\n \n # not used\n model_name: str = 'human'\n openai_cost = 0\n desire = ''\n plan_strategy = ''\n correct_belief = True\n \n class Config:\n arbitrary_types_allowed = True\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = \"As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:\\n{items_info}\".format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items)\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n # Human = auctioneer, AI = bidder\n self.dialogue_history += [\n HumanMessage(content=plan_instruct),\n AIMessage(content='(Getting ready...)')\n ]\n return ''\n \n def get_bid_instruct(self, auctioneer_msg, bid_round):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg), \n AIMessage(content='')\n ]\n return auctioneer_msg\n \n def bid(self, bid_instruct):\n # wait for the cue to handle user input\n while self.semaphore <= 0:\n time.sleep(1)\n \n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=self.input_box)\n ]\n self.semaphore -= 1\n self.need_input = False\n return self.input_box\n \n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct_summarize = f\"{bidding_history}\\n\\n{hammer_msg}\\n{win_lose_msg}\"\n return instruct_summarize\n \n def summarize(self, instruct_summarize: str):\n self.dialogue_history += [\n HumanMessage(content=instruct_summarize),\n AIMessage(content='(Taking notes...)')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n return ''\n \n def get_replan_instruct(self):\n return ''\n\n def replan(self, instruct_replan):\n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n def to_monitors(self, as_json=False):\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'human_name': self.human_name,\n 'model_name': self.model_name,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'engagement_count': self.engagement_count,\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n 0, \n 0, \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n 0, \n 0, \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n [],\n [],\n [], \n []\n ]" }, { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # history about the bidding war of one item\n items_queue: List[Item] = [] # updates when a item is taken.\n auction_logs = defaultdict(list) # history about the bidding war of all items\n openai_cost = 0\n prev_round_max_bid: int = -1\n min_bid: int = 0\n fail_to_sell = False\n min_markup_pct = 0.1\n\n class Config:\n arbitrary_types_allowed = True\n \n def init_items(self, items: List[Item]):\n for item in items:\n # reset discounted price\n item.reset_price()\n self.items = items\n self.items_queue = items.copy()\n\n def summarize_items_info(self):\n desc = ''\n for item in self.items:\n desc += f\"- {item.get_desc()}\\n\"\n return desc.strip()\n \n def present_item(self):\n cur_item = self.items_queue.pop(0)\n self.cur_item = cur_item\n return cur_item\n \n def shuffle_items(self):\n random.shuffle(self.items)\n self.items_queue = self.items.copy()\n \n def record_bid(self, bid_info: dict, bid_round: int):\n '''\n Save the bidding history for each round, log the highest bidder and highest bidding\n '''\n # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}\n self.bidding_history[bid_round].append(bid_info)\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n if self.highest_bid < hist['bid']:\n self.highest_bid = hist['bid']\n self.highest_bidder = hist['bidder']\n elif self.highest_bid == hist['bid']:\n # random if there's a tie\n self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append(\n {'bidder': bid_info['bidder'], \n 'bid': bid_info['bid'], \n 'bid_round': bid_round})\n\n def _biddings_to_string(self, bid_round: int):\n '''\n Return a string that summarizes the bidding history in a round\n '''\n # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\\n'\n bid_hist_text = ''\n for js in self.bidding_history[bid_round]:\n if js['bid'] < 0:\n bid_hist_text += f\"- {js['bidder']} withdrew\\n\"\n else:\n bid_hist_text += f\"- {js['bidder']}: ${js['bid']}\\n\"\n return bid_hist_text.strip()\n \n def all_bidding_history_to_string(self):\n bid_hist_text = ''\n for bid_round in self.bidding_history:\n bid_hist_text += f\"Round {bid_round}:\\n{self._biddings_to_string(bid_round)}\\n\\n\"\n return bid_hist_text.strip()\n\n def ask_for_bid(self, bid_round: int):\n '''\n Ask for bid, return the message to be sent to bidders\n '''\n if self.highest_bidder is None:\n if bid_round > 0:\n msg = f\"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?\"\n else:\n remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]\n msg = f\"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\\n\\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?\"\n else:\n bidding_history = self._biddings_to_string(bid_round - 1)\n msg = f\"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\\n{bidding_history}\\n\\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?\"\n return msg\n \n def ask_for_rebid(self, fail_msg: str, bid_price: int):\n return f\"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid.\"\n\n def get_hammer_msg(self):\n if self.highest_bidder is None:\n return f\"Since no one bid on {self.cur_item.name}, we'll move on to the next item.\"\n else:\n return f\"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}.\"# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}.\"\n\n def check_hammer(self, bid_round: int):\n # check if the item is sold\n self.fail_to_sell = False\n num_bid = self._num_bids_in_round(bid_round)\n\n # highest_bidder has already been updated in record_bid().\n # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item\n if self.highest_bidder is None:\n if num_bid == 0:\n # failed to sell, as there is no highest bidder\n self.fail_to_sell = True\n if self.enable_discount and bid_round < 3:\n # lower the starting price by 50%. discoutn only applies to the first 3 rounds\n self.cur_item.lower_price(0.5)\n is_sold = False\n else:\n is_sold = True\n else:\n # won't happen\n raise ValueError(f\"highest_bidder is None but num_bid is {num_bid}\")\n else:\n if self.prev_round_max_bid < 0 and num_bid == 1:\n # only one bidder in the first round \n is_sold = True\n else:\n self.prev_round_max_bid = self.highest_bid\n is_sold = self._num_bids_in_round(bid_round) == 0\n return is_sold\n \n def _num_bids_in_round(self, bid_round: int):\n # check if there is no bid in the current round\n cnt = 0\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n cnt += 1\n return cnt\n\n def hammer_fall(self):\n print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append({\n 'bidder': self.highest_bidder, \n 'bid': f\"{self.highest_bid} (${self.cur_item.true_value})\", # no need for the first $, as it will be added in the self.log()\n 'bid_round': 'Hammer price (true value)'})\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n\n def end_auction(self):\n return len(self.items_queue) == 0\n \n def gather_all_status(self, bidders: List[Bidder]):\n status = {}\n for bidder in bidders:\n status[bidder.name] = {\n 'profit': bidder.profit, \n 'items_won': bidder.items_won\n }\n return status\n\n def parse_bid(self, text: str):\n prompt = PARSE_BID_INSTRUCTION.format(response=text)\n with get_openai_callback() as cb:\n llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n result = llm([HumanMessage(content=prompt)]).content\n self.openai_cost += cb.total_cost\n \n bid_number = re.findall(r'\\$?\\d+', result.replace(',', ''))\n # find number in the result\n if '-1' in result:\n return -1\n elif len(bid_number) > 0:\n return int(bid_number[-1].replace('$', ''))\n else:\n print('* Rebid:', text)\n return None\n\n def log(self, bidder_personal_reports: list = [], show_model_name=True):\n ''' example\n Apparatus H, starting at $1000.\n\n 1st bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): $1200\n Bidder 2 (gpt-3.5-turbo-16k-0613): $1100\n Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n \n 2nd bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn\n \n Hammer price:\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n '''\n markdown_output = \"## Auction Log\\n\\n\"\n for i, (item, bids) in enumerate(self.auction_logs.items()):\n markdown_output += f\"### {i+1}. {item}\\n\\n\"\n cur_bid_round = -1\n for i, bid in enumerate(bids):\n if bid['bid_round'] != cur_bid_round:\n cur_bid_round = bid['bid_round']\n if isinstance(bid['bid_round'], int):\n markdown_output += f\"\\n#### {p.ordinal(bid['bid_round']+1)} bid:\\n\\n\"\n else:\n markdown_output += f\"\\n#### {bid['bid_round']}:\\n\\n\"\n bid_price = f\"${bid['bid']}\" if bid['bid'] != -1 else 'Withdrew'\n if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):\n if show_model_name:\n markdown_output += f\"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\\n\"\n else:\n markdown_output += f\"* {bid['bidder']}: {bid_price}\\n\"\n else:\n markdown_output += f\"* None bid\\n\"\n markdown_output += \"\\n\"\n \n if len(bidder_personal_reports) != 0:\n markdown_output += f\"\\n## Personal Report\"\n for report in bidder_personal_reports:\n markdown_output += f\"\\n\\n{report}\"\n return markdown_output.strip()\n \n def finish_auction(self):\n self.auction_logs = defaultdict(list)\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.items_queue = []\n self.items = []\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n self.min_bid = 0" }, { "identifier": "run_auction", "path": "auction_workflow.py", "snippet": "def run_auction(\n auction_hash: str, \n auctioneer: Auctioneer, \n bidder_list: List[Bidder], \n thread_num: int, \n yield_for_demo=True,\n log_dir=LOG_DIR,\n repeat_num=0,\n memo_file=None):\n \n # bidder_list[0].verbose=True\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n # ***************** Learn Round ****************\n for bidder in bidder_list:\n if bidder.enable_learning and memo_file:\n # if no prev memo file, then no need to learn.\n if os.path.exists(memo_file):\n with open(memo_file) as f:\n data = json.load(f)\n past_learnings = data['learnings'][bidder.name]\n past_auction_log = data['auction_log']\n bidder.learn_from_prev_auction(past_learnings, past_auction_log)\n \n # ***************** Plan Round *****************\n # init bidder profit\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n\n plan_instructs = [bidder.get_plan_instruct(auctioneer.items) for bidder in bidder_list]\n\n bidding_multithread(bidder_list, plan_instructs, func_type='plan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bar = tqdm(total=len(auctioneer.items_queue), desc='Auction Progress')\n while not auctioneer.end_auction():\n cur_item = auctioneer.present_item()\n \n bid_round = 0\n while True:\n # ***************** Bid Round ***************** \n auctioneer_msg = auctioneer.ask_for_bid(bid_round)\n _bidder_list = []\n _bid_instruct_list = []\n # remove highest bidder and withdrawn bidders\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder or bidder.withdraw:\n bidder.need_input = False\n continue\n else:\n bidder.need_input = True # enable input from demo\n instruct = bidder.get_bid_instruct(auctioneer_msg, bid_round)\n _bidder_list.append(bidder)\n _bid_instruct_list.append(instruct)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + enable_human_box(bidder_list)\n \n _msgs = bidding_multithread(_bidder_list, _bid_instruct_list, func_type='bid', thread_num=thread_num)\n\n for i, (msg, bidder) in enumerate(zip(_msgs, _bidder_list)):\n if bidder.model_name == 'rule':\n bid_price = bidder.bid_rule(auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n else:\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n\n # can't bid more than budget or less than previous highest bid\n while True:\n fail_msg = bidder.bid_sanity_check(bid_price, auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n if fail_msg is None: \n break\n else:\n bidder.need_input = True # enable input from demo\n auctioneer_msg = auctioneer.ask_for_rebid(fail_msg=fail_msg, bid_price=bid_price)\n rebid_instruct = bidder.get_rebid_instruct(auctioneer_msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n msg = bidder.rebid_for_failure(rebid_instruct)\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bidder.set_withdraw(bid_price)\n auctioneer.record_bid({'bidder': bidder, 'bid': bid_price, 'raw_msg': msg}, bid_round)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n is_sold = auctioneer.check_hammer(bid_round)\n bid_round += 1\n if is_sold: \n break\n else:\n if auctioneer.fail_to_sell and auctioneer.enable_discount:\n for bidder in bidder_list:\n bidder.set_withdraw(0) # back in the game\n\n # ***************** Summarize ***************** \n summarize_instruct_list = []\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder:\n win_lose_msg = bidder.win_bid(cur_item, auctioneer.highest_bid)\n else:\n win_lose_msg = bidder.lose_bid(cur_item)\n msg = bidder.get_summarize_instruct(\n bidding_history=auctioneer.all_bidding_history_to_string(),\n hammer_msg=auctioneer.get_hammer_msg(),\n win_lose_msg=win_lose_msg\n )\n summarize_instruct_list.append(msg)\n\n # record profit information of all bidders for each bidder\n # (not used in the auction, just for belief tracking evaluation)\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n \n bidding_multithread(bidder_list, summarize_instruct_list, func_type='summarize', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n # ***************** Replan *****************\n if len(auctioneer.items_queue) > 0: # no need to replan if all items are sold\n replan_instruct_list = [bidder.get_replan_instruct(\n # bidding_history=auctioneer.all_bidding_history_to_string(), \n # hammer_msg=auctioneer.get_hammer_msg()\n ) for bidder in bidder_list]\n bidding_multithread(bidder_list, replan_instruct_list, func_type='replan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n auctioneer.hammer_fall()\n bar.update(1)\n\n total_cost = sum([b.openai_cost for b in bidder_list]) + auctioneer.openai_cost\n bidder_reports = [bidder.profit_report() for bidder in bidder_list]\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list, profit_report=True)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log(bidder_reports) + f'\\n## Total Cost: ${total_cost}'] + [disable_gr, enable_gr] + disable_all_box(bidder_list)\n \n memo = {'auction_log': auctioneer.log(show_model_name=False),\n 'memo_text': bidder_reports,\n 'profit': {bidder.name: bidder.profit for bidder in bidder_list},\n 'total_cost': total_cost,\n 'learnings': {bidder.name: bidder.learnings for bidder in bidder_list},\n 'model_info': {bidder.name: bidder.model_name for bidder in bidder_list}}\n log_bidders(log_dir, auction_hash, bidder_list, repeat_num, memo)\n \n auctioneer.finish_auction()\n \n if not yield_for_demo:\n yield total_cost" }, { "identifier": "make_auction_hash", "path": "auction_workflow.py", "snippet": "def make_auction_hash():\n return str(int(time.time()))" }, { "identifier": "chunks", "path": "utils.py", "snippet": "def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]" }, { "identifier": "reset_state_list", "path": "utils.py", "snippet": "def reset_state_list(*states):\n empty = [None for _ in states[1:]]\n return [[]] + empty" } ]
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,751
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else:
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = [] for i, chunk in enumerate(chunks(args, len(input_keys))): js = {'name': f"Bidder {i+1}", 'auction_hash': auction_hash} for k, v in zip(input_keys, chunk): js[k] = v input_jsl.append(js) for js in input_jsl: js.pop('chatbot') if 'human' in js['model_name']: bidder_list.append(HumanBidder.create(**js)) else:
bidder_list.append(Bidder.create(**js))
1
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
20,448
if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] wav, tokens = model.generate_with_chroma(['the intro of ' + prompt], audio_chords[...,:30*sr], sr, progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range(int((duration - overlap) // sub_duration) - 1): wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(i+1)*sr:(sub_duration*(i+1)+30)*sr], melody_sample_rate=sr, descriptions=['chorus of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if int(duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(len(wavs))*sr:], melody_sample_rate=sr, descriptions=['the outro of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) else: # Case 3 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - 10) // sub_duration - 1): model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i+1) set_generation_params(sub_duration + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) wav = wavs[0][...,:sub_duration*wav_sr] for i in range(len(wavs)-1): if i == len(wavs)-2: wav = torch.concat([wav,wavs[i+1]],dim=-1) else: wav = torch.concat([wav,wavs[i+1][...,:sub_duration*wav_sr]],dim=-1) wav = wav.cpu() else: ''' if not audio_chords: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 4 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) else: # Case 5 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) else: audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[2] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords_wavform = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] audio_chords_duration = audio_chords_wavform.shape[-1] / sr if continuation: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 6 wav, tokens = model.generate_continuation( prompt=audio_chords_wavform, prompt_sample_rate=sr, descriptions=[prompt], progress=True, return_tokens=True ) else: # Case 7 wav, tokens = model.generate_continuation_with_text_chroma( audio_chords_wavform, sr, [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) else: # Case 8 set_generation_params(duration) wav, tokens = model.generate_with_chroma( [prompt], audio_chords_wavform, sr, progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens)
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device compression_model = load_compression_model( model_id, device=device, cache_dir=model_path ) lm = load_lm_model(model_id, device=device, cache_dir=model_path) return MusicGen(model_id, compression_model, lm) def predict( self, model_version: str = Input( description="Model type. Select `fine-tuned` if you trained the model into your own repository.", default="stereo-chord-large", choices=["chord", "chord-large", "stereo-chord", "stereo-chord-large", "fine-tuned"] ), prompt: str = Input( description="A description of the music you want to generate.", default=None ), text_chords: str = Input( description="A text based chord progression condition. Single uppercase alphabet character(eg. `C`) is considered as a major chord. Chord attributes like(`maj`, `min`, `dim`, `aug`, `min6`, `maj6`, `min7`, `minmaj7`, `maj7`, `7`, `dim7`, `hdim7`, `sus2` and `sus4`) can be added to the root alphabet character after `:`.(eg. `A:min7`) Each chord token splitted by `SPACE` is allocated to a single bar. If more than one chord must be allocated to a single bar, cluster the chords adding with `,` without any `SPACE`.(eg. `C,C:7 G, E:min A:min`) You must choose either only one of `audio_chords` below or `text_chords`.", default=None ), bpm: float = Input( description="BPM condition for the generated output. `text_chords` will be processed based on this value. This will be appended at the end of `prompt`.", default=None ), time_sig: str = Input( description="Time signature value for the generate output. `text_chords` will be processed based on this value. This will be appended at the end of `prompt`.", default="4/4" ), audio_chords: Path = Input( description="An audio file that will condition the chord progression. You must choose only one among `audio_chords` or `text_chords` above.", default=None, ), audio_start: int = Input( description="Start time of the audio file to use for chord conditioning.", default=0, ge=0, ), audio_end: int = Input( description="End time of the audio file to use for chord conditioning. If None, will default to the end of the audio clip.", default=None, ge=0, ), duration: int = Input( description="Duration of the generated audio in seconds.", default=8 ), continuation: bool = Input( description="If `True`, generated music will continue from `audio_chords`. If chord conditioning, this is only possible when the chord condition is given with `text_chords`. If `False`, generated music will mimic `audio_chords`'s chord.", default=False, ), # continuation_start: int = Input( # description="Start time of the audio file to use for continuation.", # default=0, # ge=0, # ), # continuation_end: int = Input( # description="End time of the audio file to use for continuation. If -1 or None, will default to the end of the audio clip.", # default=None, # ge=0, # ), multi_band_diffusion: bool = Input( description="If `True`, the EnCodec tokens will be decoded with MultiBand Diffusion. Not compatible with stereo models.", default=False, ), normalization_strategy: str = Input( description="Strategy for normalizing audio.", default="loudness", choices=["loudness", "clip", "peak", "rms"], ), chroma_coefficient: float = Input( description="Coefficient value multiplied to multi-hot chord chroma.", default=1.0, ge=0.5, le=2.5 ), top_k: int = Input( description="Reduces sampling to the k most likely tokens.", default=250 ), top_p: float = Input( description="Reduces sampling to tokens with cumulative probability of p. When set to `0` (default), top_k sampling is used.", default=0.0, ), temperature: float = Input( description="Controls the 'conservativeness' of the sampling process. Higher temperature means more diversity.", default=1.0, ), classifier_free_guidance: int = Input( description="Increases the influence of inputs on the output. Higher values produce lower-varience outputs that adhere more closely to inputs.", default=3, ), output_format: str = Input( description="Output format for generated audio.", default="wav", choices=["wav", "mp3"], ), seed: int = Input( description="Seed for random number generator. If `None` or `-1`, a random seed will be used.", default=None, ), ) -> Path: if text_chords == '': text_chords = None if text_chords and audio_chords and not continuation: raise ValueError("Must provide either only one of `audio_chords` or `text_chords`.") if text_chords and not bpm: raise ValueError("There must be `bpm` value set when text based chord conditioning.") if text_chords and (not time_sig or time_sig==""): raise ValueError("There must be `time_sig` value set when text based chord conditioning.") if continuation and not audio_chords: raise ValueError("Must provide an audio input file via `audio_chords` if continuation is `True`.") if multi_band_diffusion and int(self.model.lm.cfg.transformer_lm.n_q) == 8: raise ValueError("Multi-band Diffusion only works with non-stereo models.") if prompt is None: prompt = '' if time_sig is not None and not time_sig == '': if prompt == '': prompt = time_sig else: prompt = prompt + ', ' + time_sig if bpm is not None: if prompt == '': prompt = str(bpm) else: prompt = prompt + f', bpm : {bpm}' if model_version == "fine-tuned": try: self.model except AttributeError: raise Exception("ERROR: Fine-tuned weights don't exist! Is the model trained from `sakemin/musicgen-chord`? If not, set `model_version` from `chord`, `chord-large`, `stereo-chord` and `stereo-chord-large`.") else: if os.path.isfile(f'musicgen-{model_version}.th'): pass else: url = f"https://weights.replicate.delivery/default/musicgen-chord/musicgen-{model_version}.th" dest = f"musicgen-{model_version}.th" subprocess.check_call(["pget", url, dest], close_fds=False) self.model = load_ckpt(f'musicgen-{model_version}.th', self.device) self.model.lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True model = self.model set_generation_params = lambda duration: model.set_generation_params( duration=duration, top_k=top_k, top_p=top_p, temperature=temperature, cfg_coef=classifier_free_guidance, ) model.lm.condition_provider.conditioners['self_wav'].chroma_coefficient = chroma_coefficient if not seed or seed == -1: seed = torch.seed() % 2 ** 32 - 1 set_all_seeds(seed) set_all_seeds(seed) print(f"Using seed {seed}") ''' if duration > 30: encodec_rate = 50 sub_duration=25 overlap = 30 - sub_duration wavs = [] wav_sr = model.sample_rate set_generation_params(30) if (text_chords is None) and audio_chords is None: # Case 1 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - overlap) // sub_duration - 1): wav, tokens= model.generate_continuation_with_audio_token( prompt=tokens[...,sub_duration*encodec_rate:], descriptions=[prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_token( prompt=tokens[...,sub_duration*encodec_rate:], descriptions=[prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) elif (text_chords is None or text_chords == '') and audio_chords is not None: # Case 2 audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[-1] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] wav, tokens = model.generate_with_chroma(['the intro of ' + prompt], audio_chords[...,:30*sr], sr, progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range(int((duration - overlap) // sub_duration) - 1): wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(i+1)*sr:(sub_duration*(i+1)+30)*sr], melody_sample_rate=sr, descriptions=['chorus of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if int(duration - overlap) % sub_duration != 0: set_generation_params(overlap + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_audio_chroma( prompt=tokens[...,sub_duration*encodec_rate:], melody_wavs = audio_chords[...,sub_duration*(len(wavs))*sr:], melody_sample_rate=sr, descriptions=['the outro of ' + prompt], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) else: # Case 3 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) for i in range((duration - 10) // sub_duration - 1): model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) if (duration - overlap) % sub_duration != 0: model.lm.condition_provider.conditioners['self_wav'].set_continuation_count(sub_duration/30, i+1) set_generation_params(sub_duration + ((duration - overlap) % sub_duration)) wav, tokens = model.generate_continuation_with_audio_tokens_and_text_chroma( tokens[...,sub_duration*encodec_rate:], [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens) wavs.append(wav.detach().cpu()) wav = wavs[0][...,:sub_duration*wav_sr] for i in range(len(wavs)-1): if i == len(wavs)-2: wav = torch.concat([wav,wavs[i+1]],dim=-1) else: wav = torch.concat([wav,wavs[i+1][...,:sub_duration*wav_sr]],dim=-1) wav = wav.cpu() else: ''' if not audio_chords: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 4 wav, tokens = model.generate([prompt], progress=True, return_tokens=True) else: # Case 5 wav, tokens = model.generate_with_text_chroma(descriptions = [prompt], chord_texts = [text_chords], bpm = [bpm], meter = [int(time_sig.split('/')[0])], progress=True, return_tokens=True) else: audio_chords, sr = torchaudio.load(audio_chords) audio_chords = audio_chords[None] if audio_chords.dim() == 2 else audio_chords audio_start = 0 if not audio_start else audio_start if audio_end is None or audio_end == -1: audio_end = audio_chords.shape[2] / sr if audio_start > audio_end: raise ValueError( "`audio_start` must be less than or equal to `audio_end`" ) audio_chords_wavform = audio_chords[ ..., int(sr * audio_start) : int(sr * audio_end) ] audio_chords_duration = audio_chords_wavform.shape[-1] / sr if continuation: set_generation_params(duration) if text_chords is None or text_chords == '': # Case 6 wav, tokens = model.generate_continuation( prompt=audio_chords_wavform, prompt_sample_rate=sr, descriptions=[prompt], progress=True, return_tokens=True ) else: # Case 7 wav, tokens = model.generate_continuation_with_text_chroma( audio_chords_wavform, sr, [prompt], [text_chords], bpm=[bpm], meter=[int(time_sig.split('/')[0])], progress=True, return_tokens=True ) else: # Case 8 set_generation_params(duration) wav, tokens = model.generate_with_chroma( [prompt], audio_chords_wavform, sr, progress=True, return_tokens=True ) if multi_band_diffusion: wav = self.mbd.tokens_to_wav(tokens)
audio_write(
6
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ControlNetPseudo3DModel", "path": "video_diffusion/models/controlnet_3d_condition.py", "snippet": "class ControlNetPseudo3DModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n **kwargs\n ):\n super().__init__()\n\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n # self.conv_in = PseudoConv3d(\n # in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n # )\n self.conv_in = InflatedConv3d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetPseudo3DConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n #non temperal \n # kwargs_copy=copy.deepcopy(kwargs)\n # temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n # and (not is_final_block))\n # kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n # model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n # controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n # model_config=kwargs\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetPseudo3DOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n \n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb)\n\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n # print(sample.shape,controlnet_cond.shape)\n sample += controlnet_cond\n \n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetPseudo3DOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, control_temporal_idx=None, control_mid_temporal=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\"\n ]\n # config[\"control_temporal_idx\"] = control_temporal_idx\n # config[\"control_mid_temporal\"] = control_mid_temporal\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n\n state_dict = torch.load(model_file, map_location=\"cpu\")\n for k, v in model.state_dict().items():\n if '_temp.' in k:\n if 'conv' in k:\n state_dict.update({k: v})\n else:\n copyk = k\n copyk = copyk.replace('_temp.', '1.')\n state_dict.update({k: state_dict[copyk]})\n model.load_state_dict(state_dict)\n\n return model\n\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n \n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ImageSequenceDataset", "path": "video_diffusion/data/dataset.py", "snippet": "class ImageSequenceDataset(Dataset):\n def __init__(\n self,\n path: str,\n prompt_ids: torch.Tensor,\n prompt: str,\n start_sample_frame: int=0,\n n_sample_frame: int = 8,\n sampling_rate: int = 1,\n stride: int = -1, # only used during tuning to sample a long video\n image_mode: str = \"RGB\",\n image_size: int = 512,\n crop: str = \"center\",\n \n class_data_root: str = None,\n class_prompt_ids: torch.Tensor = None,\n \n offset: dict = {\n \"left\": 0,\n \"right\": 0,\n \"top\": 0,\n \"bottom\": 0\n },\n **args\n \n ):\n self.path = path\n self.images = self.get_image_list(path)\n self.n_images = len(self.images)\n self.offset = offset\n self.start_sample_frame = start_sample_frame\n if n_sample_frame < 0:\n n_sample_frame = len(self.images) \n self.n_sample_frame = n_sample_frame\n # local sampling rate from the video\n self.sampling_rate = sampling_rate\n\n self.sequence_length = (n_sample_frame - 1) * sampling_rate + 1\n if self.n_images < self.sequence_length:\n raise ValueError(f\"self.n_images {self.n_images } < self.sequence_length {self.sequence_length}: Required number of frames {self.sequence_length} larger than total frames in the dataset {self.n_images }\")\n \n # During tuning if video is too long, we sample the long video every self.stride globally\n self.stride = stride if stride > 0 else (self.n_images+1)\n self.video_len = (self.n_images - self.sequence_length) // self.stride + 1\n\n self.image_mode = image_mode\n self.image_size = image_size\n crop_methods = {\n \"center\": center_crop,\n \"random\": random_crop,\n }\n if crop not in crop_methods:\n raise ValueError\n self.crop = crop_methods[crop]\n\n self.prompt = prompt\n self.prompt_ids = prompt_ids\n # Negative prompt for regularization to avoid overfitting during one-shot tuning\n if class_data_root is not None:\n self.class_data_root = Path(class_data_root)\n self.class_images_path = sorted(list(self.class_data_root.iterdir()))\n self.num_class_images = len(self.class_images_path)\n self.class_prompt_ids = class_prompt_ids\n \n \n def __len__(self):\n max_len = (self.n_images - self.sequence_length) // self.stride + 1\n \n if hasattr(self, 'num_class_images'):\n max_len = max(max_len, self.num_class_images)\n \n return max_len\n\n def __getitem__(self, index):\n return_batch = {}\n frame_indices = self.get_frame_indices(index%self.video_len)\n frames = [self.load_frame(i) for i in frame_indices]\n frames = self.transform(frames)\n\n return_batch.update(\n {\n \"images\": frames,\n \"prompt_ids\": self.prompt_ids,\n }\n )\n\n if hasattr(self, 'class_data_root'):\n class_index = index % (self.num_class_images - self.n_sample_frame)\n class_indices = self.get_class_indices(class_index) \n frames = [self.load_class_frame(i) for i in class_indices]\n return_batch[\"class_images\"] = self.tensorize_frames(frames)\n return_batch[\"class_prompt_ids\"] = self.class_prompt_ids\n return return_batch\n \n def transform(self, frames):\n frames = self.tensorize_frames(frames)\n frames = offset_crop(frames, **self.offset)\n frames = short_size_scale(frames, size=self.image_size)\n frames = self.crop(frames, height=self.image_size, width=self.image_size)\n return frames\n\n @staticmethod\n def tensorize_frames(frames):\n frames = rearrange(np.stack(frames), \"f h w c -> c f h w\")\n return torch.from_numpy(frames).div(255) * 2 - 1\n\n def load_frame(self, index):\n image_path = os.path.join(self.path, self.images[index])\n return Image.open(image_path).convert(self.image_mode)\n\n def load_class_frame(self, index):\n image_path = self.class_images_path[index]\n return Image.open(image_path).convert(self.image_mode)\n\n def get_frame_indices(self, index):\n if self.start_sample_frame is not None:\n frame_start = self.start_sample_frame + self.stride * index\n else:\n frame_start = self.stride * index\n return (frame_start + i * self.sampling_rate for i in range(self.n_sample_frame))\n\n def get_class_indices(self, index):\n frame_start = index\n return (frame_start + i for i in range(self.n_sample_frame))\n\n @staticmethod\n def get_image_list(path):\n images = []\n for file in sorted(os.listdir(path)):\n if file.endswith(IMAGE_EXTENSION):\n images.append(file)\n return images" }, { "identifier": "get_time_string", "path": "video_diffusion/common/util.py", "snippet": "def get_time_string() -> str:\n x = datetime.datetime.now()\n return f\"{(x.year - 2000):02d}{x.month:02d}{x.day:02d}-{x.hour:02d}{x.minute:02d}{x.second:02d}\"" }, { "identifier": "get_function_args", "path": "video_diffusion/common/util.py", "snippet": "def get_function_args() -> Dict:\n frame = sys._getframe(1)\n args, _, _, values = inspect.getargvalues(frame)\n args_dict = copy.deepcopy({arg: values[arg] for arg in args})\n\n return args_dict" }, { "identifier": "get_logger_config_path", "path": "video_diffusion/common/logger.py", "snippet": "def get_logger_config_path(logdir):\n # accelerate handles the logger in multiprocessing\n logger = get_logger(__name__)\n logging.basicConfig(\n level=logging.INFO, \n format='%(asctime)s:%(levelname)s : %(message)s', \n datefmt='%a, %d %b %Y %H:%M:%S', \n filename=os.path.join(logdir, 'log.log'),\n filemode='w')\n chlr = logging.StreamHandler()\n chlr.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s : %(message)s'))\n logger.logger.addHandler(chlr)\n return logger" }, { "identifier": "log_train_samples", "path": "video_diffusion/common/image_util.py", "snippet": "def log_train_samples(\n train_dataloader,\n save_path,\n num_batch: int = 4,\n):\n train_samples = []\n for idx, batch in enumerate(train_dataloader):\n if idx >= num_batch:\n break\n train_samples.append(batch[\"images\"])\n\n train_samples = torch.cat(train_samples).numpy()\n train_samples = rearrange(train_samples, \"b c f h w -> b f h w c\")\n train_samples = (train_samples * 0.5 + 0.5).clip(0, 1)\n train_samples = numpy_batch_seq_to_pil(train_samples)\n train_samples = [make_grid(images, cols=int(np.ceil(np.sqrt(len(train_samples))))) for images in zip(*train_samples)]\n # save_images_as_gif(train_samples, save_path)\n save_gif_mp4_folder_type(train_samples, save_path)" }, { "identifier": "instantiate_from_config", "path": "video_diffusion/common/instantiate_from_config.py", "snippet": "def instantiate_from_config(config:dict, **args_from_code):\n \"\"\"Util funciton to decompose differenct modules using config\n\n Args:\n config (dict): with key of \"target\" and \"params\", better from yaml\n static \n args_from_code: additional con\n\n\n Returns:\n a validation/training pipeline, a module\n \"\"\"\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()), **args_from_code)" }, { "identifier": "P2pSampleLogger", "path": "video_diffusion/pipelines/p2p_validation_loop_controlnet.py", "snippet": "class P2pSampleLogger:\n def __init__(\n self,\n editing_prompts: List[str],\n clip_length: int,\n logdir: str,\n subdir: str = \"sample\",\n num_samples_per_prompt: int = 1,\n sample_seeds: List[int] = None,\n num_inference_steps: int = 20,\n guidance_scale: float = 7,\n strength: float = None,\n annotate: bool = False,\n annotate_size: int = 15,\n use_make_grid: bool = True,\n grid_column_size: int = 2,\n prompt2prompt_edit: bool=False,\n p2p_config: dict = None,\n use_inversion_attention: bool = True,\n source_prompt: str = None,\n traverse_p2p_config: bool = False,\n **args\n ) -> None:\n self.editing_prompts = editing_prompts\n self.clip_length = clip_length\n self.guidance_scale = guidance_scale\n self.num_inference_steps = num_inference_steps\n self.strength = strength\n\n if sample_seeds is None:\n max_num_samples_per_prompt = int(1e5)\n if num_samples_per_prompt > max_num_samples_per_prompt:\n raise ValueError\n sample_seeds = torch.randint(0, max_num_samples_per_prompt, (num_samples_per_prompt,))\n sample_seeds = sorted(sample_seeds.numpy().tolist())\n self.sample_seeds = sample_seeds\n\n self.logdir = os.path.join(logdir, subdir)\n os.makedirs(self.logdir)\n\n self.annotate = annotate\n self.annotate_size = annotate_size\n self.make_grid = use_make_grid\n self.grid_column_size = grid_column_size\n self.prompt2prompt_edit = prompt2prompt_edit\n self.p2p_config = p2p_config\n self.use_inversion_attention = use_inversion_attention\n self.source_prompt = source_prompt\n self.traverse_p2p_config =traverse_p2p_config\n\n def log_sample_images(\n self, pipeline: DiffusionPipeline,\n device: torch.device, step: int,\n image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n control_image: torch.FloatTensor = None,\n latents: torch.FloatTensor = None,\n mask:torch.FloatTensor = None,\n editing_type:str = \"attribute\",\n uncond_embeddings_list: List[torch.FloatTensor] = None,\n save_dir = None,\n duration = 100,\n fps = 10,\n use_interpolater = True\n ):\n torch.cuda.empty_cache()\n samples_all = []\n attention_all = []\n # handle input image\n if image is not None:\n input_pil_images = pipeline.numpy_to_pil(tensor_to_numpy(image))[0]\n if self.annotate :\n samples_all.append([\n annotate_image(image, \"input sequence\", font_size=self.annotate_size) for image in input_pil_images\n ])\n else:\n samples_all.append(input_pil_images)\n if isinstance(self.editing_prompts,str):\n self.editing_prompts = [self.editing_prompts]\n for idx, prompt in enumerate(tqdm(self.editing_prompts, desc=\"Generating sample images\")):\n # if self.prompt2prompt_edit:\n # if self.traverse_p2p_config:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n # else:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n\n # if idx == 0 and not self.use_inversion_attention:\n # edit_type = 'save'\n # p2p_config_now.update({'save_self_attention': True})\n # print('Reflash the attention map in pipeline')\n\n # else:\n # edit_type = 'swap'\n # p2p_config_now.update({'save_self_attention': False})\n\n # p2p_config_now.update({'use_inversion_attention': self.use_inversion_attention})\n # else:\n # edit_type = None\n\n input_prompt = prompt\n\n # generator = torch.Generator(device=device)\n # generator.manual_seed(seed)\n generator = None\n sequence = []\n window = 8\n window = min(window,self.clip_length)\n start_frame = 0\n end_frame = window\n patch_index = 0\n while start_frame < self.clip_length:\n torch.cuda.empty_cache()\n if patch_index == 0:\n sequence_return = pipeline(\n prompt=input_prompt,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = 1,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + [0] + list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n else:\n sequence_return = pipeline(\n prompt=input_prompt,\n reference_global_latents = reference_global_latents,\n reference_latents = reference_latents,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + list(range(start_frame - 1,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = window,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + list(range(start_frame-1,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + list(range(start_frame-1, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n start_frame = end_frame\n end_frame = end_frame + window\n if patch_index == 0:\n reference_global_latents = sequence_return['reference_global_latents']\n reference_latents = sequence_return['reference_latents']\n patch_index = patch_index + 1\n # if self.prompt2prompt_edit:\n # sequence_temp = sequence_return['sdimage_output'].images[0]\n # # attention_output = sequence_return['attention_output']\n # else:\n # sequence_temp = sequence_return.images[0]\n sequence_temp = sequence_return['sdimage_output'].images[0]\n sequence = sequence + sequence_temp\n torch.cuda.empty_cache()\n # sequence = torch.cat(sequence,dim = 2)\n\n if self.annotate:\n images = [\n annotate_image(image, prompt, font_size=self.annotate_size) for image in sequence\n ]\n else:\n images = sequence\n control_images = []\n for i in range(control_image.shape[2]):\n control_images.append(Image.fromarray((control_image[0,:,i]*255).cpu().numpy().transpose(1,2,0).astype(np.uint8)))\n #smoother start\n if use_interpolater:\n for i in range(len(images)):\n images[i] = np.array(images[i]).transpose(2,0,1)[None:]/255\n frames = torch.from_numpy(np.stack(images, axis= 0)).cuda()\n f, C, H, W = frames.shape\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n frames = F.pad(frames,padding)\n smoother = Model()\n smoother.load_model('RIFEModel', -1)\n print('using smoother')\n with torch.no_grad():\n for i in range(f - 2):\n img0 = frames[i:i+1].float()\n img1 = frames[i+2:i+3].float()\n mid = smoother.inference(img0,img1)\n mid_padded = F.pad(mid,padding)\n frames[i+1:i+2,] = (frames[i+1:i+2,] + mid_padded[None:])/2\n torch.cuda.empty_cache()\n images = []\n for i in range(len(frames)):\n images.append(Image.fromarray((frames[i] * 255).cpu().numpy().astype(np.uint8).transpose(1,2,0)))\n # smoother end\n if self.make_grid:\n samples_all.append(control_images)\n samples_all.append(images)\n # if self.prompt2prompt_edit:\n # if attention_output is not None:\n # attention_all.append(attention_output)\n\n save_path = os.path.join(self.logdir, f\"step_{step}_{idx}.gif\")\n save_gif_mp4_folder_type(images, save_path,duration = duration,fps = fps)\n\n # if self.prompt2prompt_edit:\n\n # if attention_output is not None:\n # save_gif_mp4_folder_type(attention_output, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n\n if self.make_grid:\n samples_all = [make_grid(images, cols=int(len(samples_all))) for images in zip(*samples_all)]\n save_path = os.path.join(self.logdir, f\"step_{step}.gif\")\n save_gif_mp4_folder_type(samples_all, save_path,duration = duration,fps = fps)\n if self.prompt2prompt_edit:\n if len(attention_all) > 0 :\n attention_all = [make_grid(images, cols=1) for images in zip(*attention_all)]\n if len(attention_all) > 0:\n save_gif_mp4_folder_type(attention_all, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n return samples_all" }, { "identifier": "get_control", "path": "annotator/util.py", "snippet": "def get_control(type):\n if type == 'canny':\n from .canny import CannyDetector\n apply_control = CannyDetector()\n elif type == 'openpose':\n from .openpose import OpenposeDetector\n apply_control = OpenposeDetector()\n elif type == 'depth' or type == 'normal':\n from .midas import MidasDetector\n apply_control = MidasDetector()\n elif type == 'hed':\n from .hed import HEDdetector\n apply_control = HEDdetector()\n elif type == 'scribble':\n apply_control = None\n elif type == 'seg':\n from .uniformer import UniformerDetector\n apply_control = UniformerDetector()\n elif type == 'mlsd':\n from .mlsd import MLSDdetector\n apply_control = MLSDdetector()\n else:\n raise TypeError(type)\n return apply_control" }, { "identifier": "DDIMInterpolationScheduler", "path": "video_diffusion/pipelines/DDIMInterpolationScheduler.py", "snippet": "class DDIMInterpolationScheduler(DDIMScheduler):\n \"\"\"\n Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising\n diffusion probabilistic models (DDPMs) with non-Markovian guidance.\n\n [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`\n function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.\n [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and\n [`~SchedulerMixin.from_pretrained`] functions.\n\n For more details, see the original paper: https://arxiv.org/abs/2010.02502\n\n Args:\n num_train_timesteps (`int`): number of diffusion steps used to train the model.\n beta_start (`float`): the starting `beta` value of inference.\n beta_end (`float`): the final `beta` value.\n beta_schedule (`str`):\n the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, optional):\n option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.\n clip_sample (`bool`, default `True`):\n option to clip predicted sample between -1 and 1 for numerical stability.\n set_alpha_to_one (`bool`, default `True`):\n each diffusion step uses the value of alphas product at that step and at the previous one. For the final\n step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the value of alpha at step 0.\n steps_offset (`int`, default `0`):\n an offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in\n stable diffusion.\n prediction_type (`str`, default `epsilon`, optional):\n prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion\n process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4\n https://imagen.research.google/video/paper.pdf)\n \"\"\"\n\n _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()\n _deprecated_kwargs = [\"predict_epsilon\"]\n order = 1\n\n def set_model(self,vae,interpolater):\n self.interpolater = interpolater\n self.vae = vae\n \n \n def decode_latents(self, latents):\n is_video = (latents.dim() == 5)\n b = latents.shape[0]\n latents = 1 / 0.18215 * latents\n \n if is_video:\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\") # torch.Size([70, 4, 64, 64])\n\n latents_split = torch.split(latents, 16, dim=0)\n image = torch.cat([self.vae.decode(l).sample for l in latents_split], dim=0)\n \n # image_full = self.vae.decode(latents).sample\n # RuntimeError: upsample_nearest_nhwc only supports output tensors with less than INT_MAX elements\n # Pytorch upsample alogrithm not work for batch size 32 -> 64 \n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n\n # image = image.cpu().float().numpy()\n # if is_video:\n # image = rearrange(image, \"(b f) c h w -> b f h w c\", b=b)\n # else:\n # image = rearrange(image, \"b c h w -> b h w c\", b=b)\n return image\n def encode_latents(self,images,generator = None):\n if len(images.shape) == 4:\n images = images[None:]\n images = ((images - 0.5) * 2 ) \n latents = self.vae.encode(images).latent_dist.sample(generator)\n latents = latents * 0.18215\n return latents\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[DDIMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`): direct output from learned diffusion model.\n timestep (`int`): current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n current instance of sample being created by diffusion process.\n eta (`float`): weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`): if `True`, compute \"corrected\" `model_output` from the clipped\n predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when\n `self.config.clip_sample` is `True`. If no clipping has happened, \"corrected\" `model_output` would\n coincide with the one provided as input and `use_clipped_model_output` will have not effect.\n generator: random number generator.\n variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we\n can directly provide the noise for the variance itself. This is useful for methods such as\n CycleDiffusion. (https://arxiv.org/abs/2210.05559)\n return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class\n\n Returns:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation (<variable name> -> <name in paper>\n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_sample -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_sample_direction -> \"direction pointing to x_t\"\n # - pred_prev_sample -> \"x_t-1\"\n\n # 1. get previous step value (=t-1)\n prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n\n # 3. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n if self.config.prediction_type == \"epsilon\":\n pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n elif self.config.prediction_type == \"sample\":\n pred_original_sample = model_output\n elif self.config.prediction_type == \"v_prediction\":\n pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n # predict V\n model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample\n else:\n raise ValueError(\n f\"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or\"\n \" `v_prediction`\"\n )\n\n # # add a interpolater\n images = self.decode_latents(pred_original_sample)\n\n f , C, H, W = images.shape\n # images = torch.from_numpy(images).cuda()\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n images= F.pad(images,padding).float()\n for i in range(1,f-2):\n img0 = images[i:i+1]\n img1 = images[i+2:i+3] \n inference_img = self.interpolater.inference(img0,img1)\n images[i+1:i+2] = inference_img\n pred_original_sample = self.encode_latents(images.to(self.vae.dtype),generator)\n pred_original_sample = rearrange(pred_original_sample[None], 'b f c h w -> b c f h w') \n\n \n # 4. Clip \"predicted x_0\"\n if self.config.clip_sample:\n pred_original_sample = torch.clamp(pred_original_sample, -1, 1)\n\n # 5. compute variance: \"sigma_t(η)\" -> see formula (16)\n # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)\n variance = self._get_variance(timestep, prev_timestep)\n std_dev_t = eta * variance ** (0.5)\n\n if use_clipped_model_output:\n # the model_output is always re-derived from the clipped x_0 in Glide\n model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)\n\n # 6. compute \"direction pointing to x_t\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output\n\n # 7. compute x_t without \"random noise\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction\n\n if eta > 0:\n # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072\n device = model_output.device\n if variance_noise is not None and generator is not None:\n raise ValueError(\n \"Cannot pass both generator and variance_noise. Please make sure that either `generator` or\"\n \" `variance_noise` stays `None`.\"\n )\n\n if variance_noise is None:\n if device.type == \"mps\":\n # randn does not work reproducibly on mps\n variance_noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator)\n variance_noise = variance_noise.to(device)\n else:\n variance_noise = torch.randn(\n model_output.shape, generator=generator, device=device, dtype=model_output.dtype\n )\n variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * variance_noise\n\n prev_sample = prev_sample + variance\n\n if not return_dict:\n return (prev_sample,)\n\n return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)" }, { "identifier": "Model", "path": "RIFEModel/RIFE_HDv3.py", "snippet": "class Model:\n def __init__(self, local_rank=-1):\n self.flownet = IFNet()\n self.device()\n self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4)\n self.epe = EPE()\n # self.vgg = VGGPerceptualLoss().to(device)\n self.sobel = SOBEL()\n if local_rank != -1:\n self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank)\n\n def train(self):\n self.flownet.train()\n\n def eval(self):\n self.flownet.eval()\n\n def device(self):\n self.flownet.to(device)\n\n def load_model(self, path, rank=0):\n def convert(param):\n if rank == -1:\n return {\n k.replace(\"module.\", \"\"): v\n for k, v in param.items()\n if \"module.\" in k\n }\n else:\n return param\n if rank <= 0:\n if torch.cuda.is_available():\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path))))\n else:\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path), map_location ='cpu')))\n \n def save_model(self, path, rank=0):\n if rank == 0:\n torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))\n\n def inference(self, img0, img1, scale=1.0):\n imgs = torch.cat((img0, img1), 1)\n scale_list = [4/scale, 2/scale, 1/scale]\n flow, mask, merged = self.flownet(imgs, scale_list)\n return merged[2]\n \n def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):\n for param_group in self.optimG.param_groups:\n param_group['lr'] = learning_rate\n img0 = imgs[:, :3]\n img1 = imgs[:, 3:]\n if training:\n self.train()\n else:\n self.eval()\n scale = [4, 2, 1]\n flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training)\n loss_l1 = (merged[2] - gt).abs().mean()\n loss_smooth = self.sobel(flow[2], flow[2]*0).mean()\n # loss_vgg = self.vgg(merged[2], gt)\n if training:\n self.optimG.zero_grad()\n loss_G = loss_cons + loss_smooth * 0.1\n loss_G.backward()\n self.optimG.step()\n else:\n flow_teacher = flow[2]\n return merged[2], {\n 'mask': mask,\n 'flow': flow[2][:, :2],\n 'loss_l1': loss_l1,\n 'loss_cons': loss_cons,\n 'loss_smooth': loss_smooth,\n }" } ]
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
20,140
print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ): args = get_function_args() vr = decord.VideoReader(dataset_config.video_path) fps = vr.get_avg_fps() duration = len(vr) / fps print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids
video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids)
2
2023-10-09 14:38:28+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n train_cls=False, pos_prob=0.5):\r\n def __len__(self):\r\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\r\n allow_invisible=False, force_invisible=False):\r\n def __getitem__(self, index):\r\n def getitem(self):\r\n def getitem_cls(self):\r\n def get_center_box(self, H, W, ratio=1 / 8):\r\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\r\n def get_one_search(self):\r\n def get_frame_ids_trident(self, visible):\r\n def get_frame_ids_stark(self, visible, valid):\r\nclass TrackingSampler(torch.utils.data.Dataset):\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = search_frames[0].shape\r" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\r\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None,\r\n joint_transform=None):\r\n def __call__(self, data: TensorDict):\r\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\r\n mode='pair', settings=None, *args, **kwargs):\r\n def _get_jittered_box(self, box, mode):\r\n def __call__(self, data: TensorDict):\r\nclass BaseProcessing:\r\nclass STARKProcessing(BaseProcessing):\r" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\r\n \"\"\"\r\n Data loader. Combines a dataset and a sampler, and provides\r\n single- or multi-process iterators over the dataset.\r\n\r\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\r\n select along which dimension the data should be stacked to form a batch.\r\n\r\n Arguments:\r\n dataset (Dataset): dataset from which to load the data.\r\n batch_size (int, optional): how many samples per batch to load\r\n (default: 1).\r\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\r\n at every epoch (default: False).\r\n sampler (Sampler, optional): defines the strategy to draw samples from\r\n the dataset. If specified, ``shuffle`` must be False.\r\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\r\n indices at a time. Mutually exclusive with batch_size, shuffle,\r\n sampler, and drop_last.\r\n num_workers (int, optional): how many subprocesses to use for data\r\n loading. 0 means that the data will be loaded in the main process.\r\n (default: 0)\r\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\r\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\r\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\r\n into CUDA pinned memory before returning them.\r\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\r\n if the dataset size is not divisible by the batch size. If ``False`` and\r\n the size of dataset is not divisible by the batch size, then the last batch\r\n will be smaller. (default: False)\r\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\r\n from workers. Should always be non-negative. (default: 0)\r\n worker_init_fn (callable, optional): If not None, this will be called on each\r\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\r\n input, after seeding and before data loading. (default: None)\r\n\r\n .. note:: By default, each worker will have its PyTorch seed set to\r\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\r\n by main process using its RNG. However, seeds for other libraries\r\n may be duplicated upon initializing workers (w.g., NumPy), causing\r\n each worker to return identical random numbers. (See\r\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\r\n use ``torch.initial_seed()`` to access the PyTorch seed for each\r\n worker in :attr:`worker_init_fn`, and use it to set other seeds\r\n before data loading.\r\n\r\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\r\n unpicklable object, e.g., a lambda function.\r\n \"\"\"\r\n\r\n __initialized = False\r\n\r\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\r\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\r\n timeout=0, worker_init_fn=None):\r\n if collate_fn is None:\r\n if stack_dim == 0:\r\n collate_fn = ltr_collate\r\n elif stack_dim == 1:\r\n collate_fn = ltr_collate_stack1\r\n else:\r\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\r\n\r\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\r\n num_workers, collate_fn, pin_memory, drop_last,\r\n timeout, worker_init_fn)\r\n\r\n self.name = name\r\n self.training = training\r\n self.epoch_interval = epoch_interval\r\n self.stack_dim = stack_dim\r" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\r\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\r\n try:\r\n im = cv.imread(path, cv.IMREAD_COLOR)\r\n\r\n # convert to rgb and return\r\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\r\n except Exception as e:\r\n print('ERROR: Could not read image \"{}\"'.format(path))\r\n print(e)\r\n return None\r" }, { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\r\n \"\"\" LaSOT dataset.\r\n\r\n Publication:\r\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\r\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\r\n CVPR, 2019\r\n https://arxiv.org/pdf/1809.07845.pdf\r\n\r\n Download the dataset from https://cis.temple.edu/lasot/download.html\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_dir if root is None else root\r\n super().__init__('LaSOT', root, image_loader)\r\n\r\n # Keep a list of all classes\r\n self.class_list = [f for f in os.listdir(self.root)]\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n with open(out_of_view_file, 'r') as f:\r\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\r\n \"\"\" GOT-10k dataset.\r\n\r\n Publication:\r\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\r\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\r\n arXiv:1810.11981, 2018\r\n https://arxiv.org/pdf/1810.11981.pdf\r\n\r\n Download dataset from http://got-10k.aitestunion.com/downloads\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).got10k_dir if root is None else root\r\n super().__init__('GOT10k', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\r\n return sequence_meta_info\r\n\r\n def _read_meta(self, seq_path):\r\n try:\r\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\r\n meta_info = f.readlines()\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\r\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\r\n 'major_class': meta_info[7].split(': ')[-1][:-1],\r\n 'root_class': meta_info[8].split(': ')[-1][:-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n with open(os.path.join(self.root, 'list.txt')) as f:\r\n dir_list = list(csv.reader(f))\r\n dir_list = [dir_name[0] for dir_name in dir_list]\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n with open(cover_file, 'r', newline='') as f:\r\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(self.root, self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None, env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_dir if root is None else root\r\n super().__init__('TrackingNet', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root, self.set_ids)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\r\n return self.image_loader(frame_path)\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid\", root, image_loader)\r\n\r\n cache_file = os.path.join(root, 'cache.json')\r\n if os.path.isfile(cache_file):\r\n # If available, load the pre-processed cache file containing meta-info for each sequence\r\n with open(cache_file, 'r') as f:\r\n sequence_list_dict = json.load(f)\r\n\r\n self.sequence_list = sequence_list_dict\r\n else:\r\n # Else process the imagenet annotations and generate the cache file\r\n self.sequence_list = self._process_anno(root)\r\n\r\n with open(cache_file, 'w') as f:\r\n json.dump(self.sequence_list, f)\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return self.image_loader(frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r\n\r\n def _process_anno(self, root):\r\n # Builds individual tracklets\r\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\r\n\r\n all_sequences = []\r\n for set in sorted(os.listdir(base_vid_anno_path)):\r\n set_id = int(set.split('_')[-1])\r\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\r\n\r\n vid_id = int(vid.split('_')[-1])\r\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\r\n\r\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\r\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\r\n\r\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\r\n for f in anno_files]\r\n\r\n tracklets = {}\r\n\r\n # Find all tracklets along with start frame\r\n for f_id, all_targets in enumerate(objects):\r\n for target in all_targets:\r\n tracklet_id = target.find('trackid').text\r\n if tracklet_id not in tracklets:\r\n tracklets[tracklet_id] = f_id\r\n\r\n for tracklet_id, tracklet_start in tracklets.items():\r\n tracklet_anno = []\r\n target_visible = []\r\n class_name_id = None\r\n\r\n for f_id in range(tracklet_start, len(objects)):\r\n found = False\r\n for target in objects[f_id]:\r\n if target.find('trackid').text == tracklet_id:\r\n if not class_name_id:\r\n class_name_id = target.find('name').text\r\n x1 = int(target.find('bndbox/xmin').text)\r\n y1 = int(target.find('bndbox/ymin').text)\r\n x2 = int(target.find('bndbox/xmax').text)\r\n y2 = int(target.find('bndbox/ymax').text)\r\n\r\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\r\n target_visible.append(target.find('occluded').text == '0')\r\n\r\n found = True\r\n break\r\n if not found:\r\n break\r\n\r\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\r\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\r\n 'target_visible': target_visible, 'image_size': image_size}\r\n all_sequences.append(new_sequence)\r\n\r\n return all_sequences\r" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO', root, image_loader)\r\n\r\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\r\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\r\n\r\n # Load the COCO set.\r\n self.coco_set = COCO(self.anno_path)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n img = self.image_loader(os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n use_lmdb - whether the dataset is stored in lmdb format\r\n \"\"\"\r\n root = env_settings(env_num).got10k_lmdb_dir if root is None else root\r\n super().__init__('GOT10k_lmdb', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n def _read_meta(meta_info):\r\n\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\r\n 'motion_class': meta_info[6].split(': ')[-1],\r\n 'major_class': meta_info[7].split(': ')[-1],\r\n 'root_class': meta_info[8].split(': ')[-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1]})\r\n\r\n return object_meta\r\n\r\n sequence_meta_info = {}\r\n for s in self.sequence_list:\r\n try:\r\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" % s)\r\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\r\n except:\r\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return sequence_meta_info\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n dir_str = decode_str(self.root, 'train/list.txt')\r\n dir_list = dir_str.split('\\n')\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # full occlusion and out_of_view files\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n # Read these files\r\n occ_list = list(\r\n map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n occlusion = torch.ByteTensor(occ_list)\r\n cover_list = list(\r\n map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n cover = torch.ByteTensor(cover_list)\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(\"train\", self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_lmdb_dir if root is None else root\r\n super().__init__('LaSOT_lmdb', root, image_loader)\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\r\n self.class_list = []\r\n for ele in class_list:\r\n if ele not in self.class_list:\r\n self.class_list.append(ele)\r\n # Keep a list of all classes\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\r\n occlusion = torch.ByteTensor(occ_list)\r\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\r\n out_of_view = torch.ByteTensor(out_view_list)\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\r\n\r\n sequence_list_dict = decode_json(root, \"cache.json\")\r\n self.sequence_list = sequence_list_dict\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid_lmdb'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return decode_img(self.root, frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO_lmdb', root, image_loader)\r\n self.root = root\r\n self.img_pth = 'images/{}{}/'.format(split, version)\r\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\r\n\r\n # Load the COCO set.\r\n print('loading annotations into memory...')\r\n tic = time.time()\r\n coco_json = decode_json(root, self.anno_path)\r\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\r\n\r\n self.coco_set = COCO(coco_json)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n # img = self.image_loader(os.path.join(self.img_pth, path))\r\n img = decode_img(self.root, os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None,env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_lmdb_dir if root is None else root\r\n super().__init__('TrackingNet_lmdb', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Adan", "path": "lib/train/optimizer/anan.py", "snippet": "class Adan(Optimizer):\r\n \"\"\"\r\n Implements a pytorch variant of Adan\r\n Adan was proposed in\r\n Adan: Adaptive Nesterov Momentum Algorithm for\r\n Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022.\r\n https://arxiv.org/abs/2208.06677\r\n Arguments:\r\n params (iterable): iterable of parameters to optimize or\r\n dicts defining parameter groups.\r\n lr (float, optional): learning rate. (default: 1e-3)\r\n betas (Tuple[float, float, flot], optional): coefficients used for\r\n first- and second-order moments. (default: (0.98, 0.92, 0.99))\r\n eps (float, optional): term added to the denominator to improve\r\n numerical stability. (default: 1e-8)\r\n weight_decay (float, optional): decoupled weight decay\r\n (L2 penalty) (default: 0)\r\n max_grad_norm (float, optional): value used to clip\r\n global grad norm (default: 0.0 no clip)\r\n no_prox (bool): how to perform the decoupled weight decay\r\n (default: False)\r\n foreach (bool): if True would use torch._foreach implementation.\r\n It's faster but uses slightly more memory. (default: True)\r\n fused (bool, optional): whether fused implementation is used.\r\n (default: False)\r\n\r\n VIT:\r\n 150\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n 300\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n \"\"\"\r\n def __init__(self,\r\n params,\r\n lr=1e-3,\r\n betas=(0.98, 0.92, 0.99),\r\n eps=1e-8,\r\n weight_decay=0.0,\r\n max_grad_norm=0.0,\r\n no_prox=False,\r\n foreach: bool = True,\r\n fused: bool = False):\r\n if not 0.0 <= max_grad_norm:\r\n raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm))\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= eps:\r\n raise ValueError('Invalid epsilon value: {}'.format(eps))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(\r\n betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(\r\n betas[1]))\r\n if not 0.0 <= betas[2] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 2: {}'.format(\r\n betas[2]))\r\n defaults = dict(lr=lr,\r\n betas=betas,\r\n eps=eps,\r\n weight_decay=weight_decay,\r\n max_grad_norm=max_grad_norm,\r\n no_prox=no_prox,\r\n foreach=foreach,\r\n fused=fused)\r\n super().__init__(params, defaults)\r\n\r\n def __setstate__(self, state):\r\n super(Adan, self).__setstate__(state)\r\n for group in self.param_groups:\r\n group.setdefault('no_prox', False)\r\n\r\n @torch.no_grad()\r\n def restart_opt(self):\r\n for group in self.param_groups:\r\n group['step'] = 0\r\n for p in group['params']:\r\n if p.requires_grad:\r\n state = self.state[p]\r\n # State initialization\r\n\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n # Exponential moving average of squared gradient values\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n # Exponential moving average of gradient difference\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\"\"\"\r\n\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n if self.defaults['max_grad_norm'] > 0:\r\n device = self.param_groups[0]['params'][0].device\r\n global_grad_norm = torch.zeros(1, device=device)\r\n\r\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'],\r\n device=device)\r\n for group in self.param_groups:\r\n\r\n for p in group['params']:\r\n if p.grad is not None:\r\n grad = p.grad\r\n global_grad_norm.add_(grad.pow(2).sum())\r\n\r\n global_grad_norm = torch.sqrt(global_grad_norm)\r\n\r\n clip_global_grad_norm = torch.clamp(\r\n max_grad_norm / (global_grad_norm + group['eps']),\r\n max=1.0).item()\r\n else:\r\n clip_global_grad_norm = 1.0\r\n\r\n for group in self.param_groups:\r\n params_with_grad = []\r\n grads = []\r\n exp_avgs = []\r\n exp_avg_sqs = []\r\n exp_avg_diffs = []\r\n neg_pre_grads = []\r\n\r\n beta1, beta2, beta3 = group['betas']\r\n # assume same step across group now to simplify things\r\n # per parameter step can be easily support\r\n # by making it tensor, or pass list into kernel\r\n if 'step' in group:\r\n group['step'] += 1\r\n else:\r\n group['step'] = 1\r\n\r\n bias_correction1 = 1.0 - beta1**group['step']\r\n bias_correction2 = 1.0 - beta2**group['step']\r\n bias_correction3 = 1.0 - beta3**group['step']\r\n\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n params_with_grad.append(p)\r\n grads.append(p.grad)\r\n\r\n state = self.state[p]\r\n if len(state) == 0:\r\n state['exp_avg'] = torch.zeros_like(p)\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n if 'neg_pre_grad' not in state or group['step'] == 1:\r\n state['neg_pre_grad'] = p.grad.clone().mul_(\r\n -clip_global_grad_norm)\r\n\r\n exp_avgs.append(state['exp_avg'])\r\n exp_avg_sqs.append(state['exp_avg_sq'])\r\n exp_avg_diffs.append(state['exp_avg_diff'])\r\n neg_pre_grads.append(state['neg_pre_grad'])\r\n\r\n kwargs = dict(\r\n params=params_with_grad,\r\n grads=grads,\r\n exp_avgs=exp_avgs,\r\n exp_avg_sqs=exp_avg_sqs,\r\n exp_avg_diffs=exp_avg_diffs,\r\n neg_pre_grads=neg_pre_grads,\r\n beta1=beta1,\r\n beta2=beta2,\r\n beta3=beta3,\r\n bias_correction1=bias_correction1,\r\n bias_correction2=bias_correction2,\r\n bias_correction3_sqrt=math.sqrt(bias_correction3),\r\n lr=group['lr'],\r\n weight_decay=group['weight_decay'],\r\n eps=group['eps'],\r\n no_prox=group['no_prox'],\r\n clip_global_grad_norm=clip_global_grad_norm,\r\n )\r\n\r\n if group['foreach']:\r\n if group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_multi_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _multi_tensor_adan(**kwargs)\r\n elif group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_single_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _single_tensor_adan(**kwargs)\r\n\r\n return loss\r" }, { "identifier": "Lion", "path": "lib/train/optimizer/lion.py", "snippet": "class Lion(Optimizer):\r\n r\"\"\"Implements Lion algorithm.\"\"\"\r\n\r\n def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):\r\n \"\"\"Initialize the hyperparameters.\r\n\r\n Args:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float, optional): learning rate (default: 1e-4)\r\n betas (Tuple[float, float], optional): coefficients used for computing\r\n running averages of gradient and its square (default: (0.9, 0.99))\r\n weight_decay (float, optional): weight decay coefficient (default: 0)\r\n \"\"\"\r\n\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))\r\n defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)\r\n super().__init__(params, defaults)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Args:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n\r\n Returns:\r\n the loss.\r\n \"\"\"\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n\r\n # Perform stepweight decay\r\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\r\n\r\n grad = p.grad\r\n state = self.state[p]\r\n # State initialization\r\n if len(state) == 0:\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n\r\n exp_avg = state['exp_avg']\r\n beta1, beta2 = group['betas']\r\n\r\n # Weight update\r\n update = exp_avg * beta1 + grad * (1 - beta1)\r\n p.add_(torch.sign(update), alpha=-group['lr'])\r\n # Decay the momentum running average coefficient\r\n exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)\r\n\r\n return loss" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\r\n return get_rank() == 0\r" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
21,027
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb")
datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader,
11
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
main.py
[ { "identifier": "load_config", "path": "config/utils.py", "snippet": "def load_config(cfg_path=None, dataset_name=None, cfg_name=None, cfg_group=None, reload=True):\n # cfg from path\n if cfg_path is not None:\n update = None\n if os.path.isfile(cfg_path):\n # update on the default cfg\n from config.base import Base, Config\n update = Base(cfg_path)\n cfg_path = [update.dataset.lower(), 'default']\n else:\n # directly specified cfg\n cfg_path = cfg_path.replace('/', '.').split('.')\n cfg_path = cfg_path if cfg_path[0] == 'config' else ['config'] + cfg_path\n cfg_module = cfg_path[1]\n cfg_class = '.'.join(cfg_path[2:])\n mod = _import_module(cfg_module)\n if hasattr(mod, cfg_class):\n cfg = getattr(mod, cfg_class)\n else:\n cfg = load_config(dataset_name=cfg_path[1], cfg_name=cfg_class, reload=reload)\n\n if update is not None:\n cfg = Config(cfg) # avoid overriding\n cfg.update(update, exclude=[]) # full override with no exclude\n return cfg\n\n # setup dict\n cfg_name_dict = load_config.cfg_name_dict # dataset_name -> {cfg.name -> cfg.idx_name}\n cfg_module_dict = load_config.cfg_module_dict # dataset_name -> cfg_module\n\n if dataset_name is not None and dataset_name not in cfg_module_dict or reload:\n mod = _import_module(dataset_name)\n cfg_module_dict[dataset_name] = mod\n cfg_name_dict[dataset_name] = {}\n for i in dir(mod):\n if not is_config(i, mod=mod): # use the 'base' class imported in 'mod'\n continue\n cfg = getattr(mod, i)\n if cfg.name:\n cfg_name_dict[dataset_name][cfg.name] = cfg.idx_name\n\n # module/cfg from dataset/cfg name\n mod = cfg_module_dict[dataset_name]\n if cfg_name is not None:\n if cfg_name not in cfg_name_dict[dataset_name]:\n raise KeyError(f'no cfg_name = {cfg_name} in module {dataset_name}')\n idx_name = cfg_name_dict[dataset_name][cfg_name]\n return getattr(mod, idx_name)\n elif cfg_group is not None:\n if not hasattr(mod, cfg_group):\n raise KeyError(f'no cfg_group = {cfg_group} in module {dataset_name}')\n cfg_g = getattr(mod, cfg_group)\n if isinstance(cfg_g, type(mod.Base)) and cfg_g._store_dict:\n cfg_g = cfg_g._store_dict\n if not isinstance(cfg_g, (tuple, list, dict, set)):\n raise ValueError(f'cfg_group = {cfg_group} appears to be {cfg_g}, not of type (tuple, list, dict, set)')\n return cfg_g\n return mod" }, { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path += [] if root in sys.path or os.path.realpath(root) in sys.path else [root]\n from config.base import Base as base\n\n print(f'\\n{prefix}<<< ======= {config._cls} ======= {title if title else config.name}', file=f_out)\n max_len = max([len(k) for k in dir(config) if not k.startswith('_')] + [0])\n for k in config.keys(): # dir would sort\n # if k.startswith('_') or _is_method(getattr(config, k)):\n # continue\n cur_attr = getattr(config, k)\n if isinstance(cur_attr, list) and len(str(cur_attr)) > 200: # overlong list\n cur_attr = '[' + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in cur_attr]) + f'\\n{prefix}\\t]'\n\n print('\\t%s%s\\t= %s' % (prefix + k, ' ' * (max_len-len(k)), str(cur_attr)), file=f_out)\n if is_config(cur_attr, base=base):\n log_config(cur_attr, f_out=f_out, prefix=prefix+'\\t', base=base)\n print('\\n', file=f_out, flush=True)" }, { "identifier": "print_mem", "path": "utils/logger.py", "snippet": "def print_mem(prefix, gpu=True, check_time=False, check_sys=False, **kwargs):\n sep = '\\n\\t' if any([gpu, check_time]) else ' '\n lines = [prefix, 'Mem Comsumption: %.2f GB' % (print_mem.process.memory_info()[0] / float(2**30))]\n if check_sys:\n sysmem = psutil.virtual_memory()\n lines += [f'Mem in sys: avail {sysmem.available / 2**30:.2f} / total {sysmem.total / 2**30:.2f}']\n if gpu:\n try:\n gpu_mem = get_gpu_mem()\n lines += [f'Availabel Mem of each GPU: {gpu_mem}']\n except FileNotFoundError:\n pass\n except sp.CalledProcessError:\n pass\n if check_time:\n cur_t = time.time()\n if not hasattr(print_mem, 't_start'):\n print_mem.t_start = cur_t\n print_mem.t = cur_t\n else:\n gap = int(cur_t-print_mem.t)\n cum = int(cur_t-print_mem.t_start)\n lines += [f'time used [gap/cum] : {gap // 60}min {gap % 60}s / {cum // 60}min {cum % 60}s']\n print_mem.t = cur_t\n print(sep.join(lines), **kwargs)" }, { "identifier": "redirect_io", "path": "utils/logger.py", "snippet": "class redirect_io(object):\n def __init__(self, log_file, debug):\n self.log_file = log_file\n self.debug = debug\n def __enter__(self):\n if self.debug or not self.log_file:\n return\n self.log_file = open(self.log_file, 'w') if isinstance(self.log_file, str) else self.log_file\n self.stdout, self.stderr = sys.stdout, sys.stderr\n sys.stdout = sys.stderr = self.log_file\n \n def __exit__(self, exc_type, exc_value, tb):\n if self.debug or not self.log_file:\n return\n if sys.exc_info() != (None, None, None):\n traceback.print_exc()\n self.log_file.close()\n sys.stdout, sys.stderr = self.stdout, self.stderr" }, { "identifier": "get_snap", "path": "config/utils.py", "snippet": "def get_snap(saving_path, step='last', snap_prefix='snap'):\n # get the best of running val (done in training)\n snap_path = os.path.join(saving_path, 'snapshots') if not saving_path.endswith('snapshots') else saving_path\n snap_steps = [f.split('.')[0].split('-')[-1] for f in os.listdir(snap_path) if f.startswith(snap_prefix)]\n if step == 'last':\n snap_steps = sorted([int(s) for s in snap_steps if s.isdigit()]) + sorted([s for s in snap_steps if not s.isdigit()])\n chosen_step = snap_steps[-1] # last saved snap (best val estimation)\n chosen_snap = os.path.join(snap_path, f'snap-{chosen_step}')\n else:\n assert isinstance(step, int) or step.isdigit() or step == 'best', f'not supported step = {step}'\n step = str(step)\n chosen_snap = None\n if step in snap_steps:\n chosen_snap = os.path.join(snap_path, f'snap-{step}')\n else:\n raise ValueError(f'step={step} not in {snap_steps} (path={snap_path})')\n return chosen_snap" }, { "identifier": "ModelTester", "path": "utils/tester.py", "snippet": "class ModelTester:\n\n # Initiation methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, config, verbose=True):\n self.config = config\n self.verbose = verbose\n\n self.save_extra = {} # for saving with extra ops\n\n if config.dataset in ['S3DIS', 'ScanNet', 'SensatUrban']:\n self.val_running_vote = self.val_running_vote_seg\n self.val_vote = self.val_vote_seg\n self.test_vote = self.test_vote_seg\n else:\n raise NotImplementedError(f'not supported dataset: {config.dataset}')\n\n def init_pointcloud_log(self, dataset, split, d, dtype=np.float32, init_fn=np.zeros):\n shape = lambda l: [l, d] if d else [l] # d - size of last dimension => each point d-dim [N, d] (d = None to have [N])\n log = [init_fn(shape=shape(t.data.shape[0]), dtype=dtype) for t in dataset.input_trees[split]]\n return log\n\n def initialize(self, ops, dataset, model, split):\n # initialize cum_dict & ops\n config = self.config\n ncls = config.num_classes\n\n run_ops = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # assumes per-gpu rst - support multi-gpu\n cum_dict = {\n 'prob': self.init_pointcloud_log(dataset, split, ncls)\n }\n\n extra_ops = [k for k in config.extra_ops.split('-') if k]\n extra_ops_solved = extra_ops.copy()\n for k in extra_ops:\n if k in ['prob', 'conf']:\n continue\n else:\n raise ValueError(f'not supported extra ops k = {k} from {config.extra_ops}')\n\n return run_ops, cum_dict, extra_ops_solved\n\n # Val methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def val_running_vote_seg(self, sess, ops, dataset, model, validation_probs, epoch=1):\n \"\"\"\n One epoch validating - running voting used during training, main task results only\n \"\"\"\n\n val_smooth = 0.95 # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)\n\n result_dict = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # result dict for seg\n val_ops = {'loss_dict': ops['loss_dict'], 'result_dict': result_dict}\n feed_dict = {ops['is_training']: False}\n\n # Initialise iterator\n sess.run(ops['val_init_op'])\n\n ep = 0\n loss_meter = {k: AverageMeter() for k in val_ops['loss_dict']} if 'loss_dict' in val_ops else{}\n cum_dict = {\n 'conf': 0, # conf from current validation\n 'prob': validation_probs, # accumulating probs\n }\n while ep < epoch:\n try:\n rst = sess.run(val_ops, feed_dict=feed_dict)\n\n loss_dict = rst['loss_dict'] if 'loss_dict' in rst else {}\n cur_rst = rst['result_dict'] # per-gpu result\n\n for k, v in loss_dict.items():\n loss_meter[k].update(v)\n\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n ep += 1\n pass\n\n if loss_meter:\n print(f'val loss avg:', ' '.join([f'{loss_n} = {meter.avg:.3f}' for loss_n, meter in loss_meter.items()]))\n\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n cur_m = metrics_from_confusions(cum_dict['conf'], proportions=proportions) # use sampled pred-label of current epoch\n vote_m = metrics_from_result(validation_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions) # use the accumulated per-point voting\n\n print(f'metrics - current {cur_m}\\n'\n f' - accumulated {vote_m}', flush=True)\n return cur_m\n\n\n def val_vote_seg(self, sess, ops, dataset, model, num_votes=20):\n \"\"\"\n Voting validating\n \"\"\"\n\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n val_smooth = 0.95\n\n # Initialise iterator with val data\n sess.run(ops['val_init_op'])\n\n # Initiate global prediction over val clouds\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n val_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'validation')\n val_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5\n if self.config.debug:\n print_dict(val_ops, head='val_vote_seg - val_ops')\n while last_min < num_votes:\n try:\n cur_rst = sess.run(val_ops, feed_dict=feed_dict)\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n new_min = np.min(dataset.min_potentials['validation'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n if self.verbose > 1:\n # Show vote results on subcloud (match original label to valid) => not the good values here\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds: ', vote_m.scalar_str)\n\n if self.verbose > 1 and int(np.ceil(new_min)) % 2 == 0:\n # Project predictions\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n print('==> Confusion on full clouds:', vote_m)\n\n sess.run(ops['val_init_op'])\n vote_ind += 1\n\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds - final: ', vote_m.scalar_str)\n\n # Project predictions\n print('==> Confusion on full clouds - final:')\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n vote_m.print()\n print('\\nfinished\\n', flush=True)\n\n return\n\n\n # Test methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def test_classification(self, model, dataset, num_votes=100):\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Number of classes predicted by the model\n nc_model = config.num_classes\n\n # Initiate votes\n average_probs = np.zeros((len(dataset.input_labels['test']), nc_model))\n average_counts = np.zeros((len(dataset.input_labels['test']), nc_model))\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n while np.min(average_counts) < num_votes:\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n probs = []\n targets = []\n obj_inds = []\n count = 0\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits, model.labels, model.inputs['object_inds'])\n prob, labels, inds = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Get probs and labels\n probs += [prob]\n targets += [labels]\n obj_inds += [inds]\n count += prob.shape[0]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(np.min(average_counts),\n 100 * count / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Average votes\n # *************\n\n # Stack all validation predictions\n probs = np.vstack(probs)\n targets = np.hstack(targets)\n obj_inds = np.hstack(obj_inds)\n\n if np.any(dataset.input_labels['test'][obj_inds] != targets):\n raise ValueError('wrong object indices')\n\n # Compute incremental average (predictions are always ordered)\n average_counts[obj_inds] += 1\n average_probs[obj_inds] += (probs - average_probs[obj_inds]) / (average_counts[obj_inds])\n\n # Save/Display temporary results\n # ******************************\n\n test_labels = np.array(dataset.label_values)\n\n # Compute classification results\n C1 = confusion_matrix(dataset.input_labels['test'],\n np.argmax(average_probs, axis=1),\n test_labels)\n\n ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)\n print('Test Accuracy = {:.1f}%'.format(ACC))\n\n s = ''\n for cc in C1:\n for c in cc:\n s += '{:d} '.format(c)\n s += '\\n'\n print(s)\n\n\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_multi_segmentation(self, model, dataset, num_votes=100, num_saves=10):\n\n ##################\n # Pre-computations\n ##################\n\n print('Preparing test structures')\n t1 = time.time()\n\n # Collect original test file names\n original_path = join(dataset.path, 'test_ply')\n test_names = [f[:-4] for f in listdir(original_path) if f[-4:] == '.ply']\n test_names = np.sort(test_names)\n\n original_labels = []\n original_points = []\n projection_inds = []\n for i, cloud_name in enumerate(test_names):\n\n # Read data in ply file\n data = read_ply(join(original_path, cloud_name + '.ply'))\n points = np.vstack((data['x'], -data['z'], data['y'])).T\n original_labels += [data['label'] - 1]\n original_points += [points]\n\n # Create tree structure to compute neighbors\n tree = KDTree(dataset.input_points['test'][i])\n projection_inds += [np.squeeze(tree.query(points, return_distance=False))]\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n ##########\n # Initiate\n ##########\n\n # Test saving path\n if config.save_test:\n test_path = join(model.saving_path, 'test')\n if not exists(test_path):\n makedirs(test_path)\n else:\n test_path = None\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Initiate result containers\n average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]\n\n #####################\n # Network predictions\n #####################\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n for v in range(num_votes):\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n all_predictions = []\n all_obj_inds = []\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits,\n model.labels,\n model.inputs['super_labels'],\n model.inputs['object_inds'],\n model.inputs['in_batches'])\n preds, labels, obj_labels, o_inds, batches = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Stack all predictions for each class separately\n max_ind = np.max(batches)\n for b_i, b in enumerate(batches):\n\n # Eliminate shadow indices\n b = b[b < max_ind - 0.5]\n\n # Get prediction (only for the concerned parts)\n obj = obj_labels[b[0]]\n predictions = preds[b][:, :config.num_classes[obj]]\n\n # Stack all results\n all_predictions += [predictions]\n all_obj_inds += [o_inds[b_i]]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(v,\n 100 * len(all_predictions) / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Project predictions on original point clouds\n # ********************************************\n\n print('\\nGetting test confusions')\n t1 = time.time()\n\n for i, probs in enumerate(all_predictions):\n\n # Interpolate prediction from current positions to original points\n obj_i = all_obj_inds[i]\n proj_predictions = probs[projection_inds[obj_i]]\n\n # Average prediction across votes\n average_predictions[obj_i] = average_predictions[obj_i] + \\\n (proj_predictions - average_predictions[obj_i]) / (v + 1)\n\n Confs = []\n for obj_i, avg_probs in enumerate(average_predictions):\n\n # Compute confusion matrices\n parts = [j for j in range(avg_probs.shape[1])]\n Confs += [confusion_matrix(original_labels[obj_i], np.argmax(avg_probs, axis=1), parts)]\n\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Save the best/worst segmentations per class\n # *******************************************\n\n print('Saving test examples')\n t1 = time.time()\n\n # Regroup confusions per object class\n Confs = np.array(Confs)\n obj_mIoUs = []\n for l in dataset.label_values:\n\n # Get confusions for this object\n obj_inds = np.where(dataset.input_labels['test'] == l)[0]\n obj_confs = np.stack(Confs[obj_inds])\n\n # Get IoU\n obj_IoUs = IoU_from_confusions(obj_confs)\n obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]\n\n # Get X best and worst prediction\n order = np.argsort(obj_mIoUs[-1])\n worst_inds = obj_inds[order[:num_saves]]\n best_inds = obj_inds[order[:-num_saves-1:-1]]\n worst_IoUs = obj_IoUs[order[:num_saves]]\n best_IoUs = obj_IoUs[order[:-num_saves-1:-1]]\n\n # Save the names in a file\n if config.save_test:\n obj_path = join(test_path, dataset.label_to_names[l])\n if not exists(obj_path):\n makedirs(obj_path)\n worst_file = join(obj_path, 'worst_inds.txt')\n best_file = join(obj_path, 'best_inds.txt')\n with open(worst_file, \"w\") as text_file:\n for w_i, w_IoUs in zip(worst_inds, worst_IoUs):\n text_file.write('{:d} {:s} :'.format(w_i, test_names[w_i]))\n for IoU in w_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n with open(best_file, \"w\") as text_file:\n for b_i, b_IoUs in zip(best_inds, best_IoUs):\n text_file.write('{:d} {:s} :'.format(b_i, test_names[b_i]))\n for IoU in b_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n # Save the clouds\n for i, w_i in enumerate(worst_inds):\n filename = join(obj_path, 'worst_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[w_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[w_i], original_labels[w_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n for i, b_i in enumerate(best_inds):\n filename = join(obj_path, 'best_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[b_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[b_i], original_labels[b_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Display results\n # ***************\n\n objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]\n instance_average = np.mean(np.hstack(obj_mIoUs))\n class_average = np.mean(objs_average)\n\n print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')\n print('-----|------|--------------------------------------------------------------------------------')\n\n s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)\n for AmIoU in objs_average:\n s += '{:4.1f} '.format(100 * AmIoU)\n print(s + '\\n')\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_vote_seg(self, sess, ops, dataset, model, num_votes=20, test_path=None, make_zip=True):\n\n config = self.config\n assert os.path.isdir(config.saving_path), f'not a dir: {config.saving_path}'\n if test_path is None:\n test_path = os.path.join(config.saving_path, 'test')\n os.makedirs(test_path, exist_ok=True)\n\n options = None # tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = None # tf.RunMetadata()\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n test_smooth = 0.98\n\n # Initialise iterator with test data\n sess.run(ops['test_init_op'])\n\n # Initiate global prediction over val clouds\n test_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'test')\n test_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5 \n if config.num_votes:\n num_votes = config.num_votes\n while last_min < num_votes:\n try:\n cur_rst = sess.run(test_ops, feed_dict=feed_dict, options=options, run_metadata=run_metadata)\n # Stack all test predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=test_smooth)\n\n except tf.errors.OutOfRangeError:\n # NOTE: need to check\n new_min = np.min(dataset.min_potentials['test'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n # if int(last_min) > 0 and int(last_min) // 5 == 0: # periodic test results\n # self.project_test_predictions(dataset, test_path)\n\n sess.run(ops['test_init_op'])\n vote_ind += 1\n\n if self.verbose:\n new_min = np.min(dataset.min_potentials['test'])\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n self.project_test_predictions(dataset, test_probs, test_path)\n print('\\nfinished\\n', flush=True)\n\n if make_zip:\n zip_name = test_path.split(os.sep) # cfg name / Log_* / test_*\n zip_name = '_'.join([i for i in ['test', *zip_name[-3:-1], zip_name[-1][len('test'):].strip('_')] if i])\n # include test_* dir (except Semantic3D, ScanNet)\n j = 'j' if config.dataset in ['ScanNet', 'Semantic3D', 'SensatUrban'] else ''\n os.system(f'cd {os.path.dirname(test_path)}; zip -rmTq{j} {zip_name}.zip {test_path.split(os.sep)[-1]}/*') # -m to move, -j junk file, -T test integrity, -q quiet\n os.system(f'rm -r {test_path}')\n return\n\n def project_test_predictions(self, dataset, test_probs, test_path):\n\n # Project predictions\n t1 = time.time()\n files = dataset.test_files\n ignored_inds = None\n if hasattr(dataset, 'ignored_labels_test'):\n ignored_inds = dataset.label_to_idx[[l for l in dataset.ignored_labels_test if l not in dataset.ignored_labels]].astype(int)\n\n config = self.config\n if config.save_test:\n pred_path = os.sep.join([*test_path.split(os.sep)[:-1], test_path.split(os.sep)[-1].replace('test', 'predictions')]) # model pred\n os.makedirs(pred_path, exist_ok=True)\n\n for i_test, file_path in enumerate(files):\n\n # Reproject probs\n probs = test_probs[i_test][dataset.test_proj[i_test], :]\n\n # Remove invalid classes in test\n if ignored_inds is not None:\n probs[:, ignored_inds] = 0\n\n # Get the predicted labels\n preds = dataset.idx_to_label[np.argmax(probs, axis=-1)]\n\n # Save plys - predictions & probs\n cloud_name = file_path.split('/')[-1]\n if config.save_test:\n points = dataset.load_evaluation_points(file_path) # test original points\n pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]] # project potentials on original points\n test_name = os.path.join(pred_path, cloud_name)\n prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values if label not in dataset.ignored_labels]\n write_ply(test_name,\n [points, preds, pots, probs],\n ['x', 'y', 'z', 'preds', 'pots'] + prob_names)\n\n # Save ascii preds - submission files\n if config.dataset == 'Semantic3D':\n ascii_name = os.path.join(test_path, dataset.ascii_files[cloud_name])\n np.savetxt(ascii_name, preds, fmt='%d')\n elif config.dataset == 'SensatUrban':\n ascii_name = os.path.join(test_path, f'{cloud_name[:-4]}.label')\n preds.astype(np.uint8).tofile(ascii_name)\n else:\n ascii_name = os.path.join(test_path, cloud_name[:-4] + '.txt')\n np.savetxt(ascii_name, preds, fmt='%d')\n\n t2 = time.time()\n if self.verbose:\n print('\\nReproject Vote in {:.1f}s\\n'.format(t2-t1))\n\n\n # Utilities\n # ------------------------------------------------------------------------------------------------------------------\n\n def cumulate_probs(self, dataset, model, rst, cum_dict, task, smooth):\n # cum_dict - {cum_dict name : {args : rst_dict}}\n\n # iterate over gpu\n for gpu_i, cloud_inds in enumerate(rst['inputs']['cloud_inds']):\n point_inds = rst['inputs']['point_inds'][gpu_i]\n\n b_start = 0\n # iterate over clouds\n for b_i, c_i in enumerate(cloud_inds): # [B]\n if 'batches_len' in rst['inputs']: # [BxN] - stacked\n b_len = rst['inputs']['batches_len'][gpu_i][0][b_i] # npoints in cloud\n b_i = np.arange(b_start, b_start + b_len)\n b_start += b_len\n else: # [B, N] - batched\n pass\n inds = point_inds[b_i] # input point inds\n\n probs = rst[task]['probs'][gpu_i][b_i]\n labels = rst[task]['labels'][gpu_i][b_i]\n if np.all(labels == -1):\n # c_pts = np.array(dataset.input_trees['validation'][c_i].data, copy=False)[inds].mean(axis=0)\n # unique_l_cnt = np.unique(dataset.input_labels['validation'][c_i][inds], return_counts=True)\n # raise ValueError(f'all invalid labels found in cumulate_prob: cloud_inds={c_i}, center_pts={c_pts}'\n # f'input_labels & counts - {unique_l_cnt}')\n continue\n if 'conf' in cum_dict:\n cur_conf = confusion_matrix(labels, np.argmax(probs, axis=-1).astype(np.int), labels=np.arange(dataset.num_classes))\n cum_dict['conf'] += cur_conf\n if 'prob' in cum_dict:\n cum_dict['prob'][c_i][inds] = smooth * cum_dict['prob'][c_i][inds] + (1 - smooth) * probs\n if 'feature' in cum_dict:\n cum_dict['feature'][c_i][inds] = smooth * cum_dict['feature'][c_i][inds] + (1 - smooth) * rst[task]['latent'][gpu_i][b_i]\n\n def _search_func(self, k_r, cloud_idx, split, dataset, neighbor_dict, verbose=True): # create tf_ops of generating neighbor_idx & get result\n if cloud_idx in neighbor_dict[k_r]:\n return neighbor_dict[k_r][cloud_idx]\n\n config = self.config\n points = np.array(dataset.input_trees[split][cloud_idx].data, copy=False) # [N, 3]\n\n from ops import get_tf_func\n func = get_tf_func(config.search, verbose=verbose)\n\n if config.search in ['knn']:\n tf_ops = tf.squeeze(func(points[None, ...], points[None, ...], k_r), axis=0)\n elif config.search in ['radius']:\n tf_ops = func(points, points, [len(points)], [len(points)], k_r)\n # if hasattr(dataset, 'neighborhood_limits'):\n # print('neighborhood_limits', dataset.neighborhood_limits[0])\n # tf_ops = tf_ops[..., :dataset.neighborhood_limits[0]]\n else:\n raise\n\n if verbose:\n print_mem(f'k = {k_r} - start', check_time=True, check_sys=True, flush=True)\n with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)) as s:\n neighbor_idx = s.run(tf_ops)\n if verbose:\n print_mem(f'neighbor_idx {neighbor_idx.shape}', check_time=True, check_sys=True, flush=True)\n\n neighbor_dict[k_r][cloud_idx] = neighbor_idx # neighbor idx - np arr\n return neighbor_idx" }, { "identifier": "ModelTrainer", "path": "utils/trainer.py", "snippet": "class ModelTrainer:\n \"\"\"\n get & train the model (potential multi-gpu training)\n \"\"\"\n\n def __init__(self, config, verbose=True):\n self.config = config\n self.verbose = verbose\n self.tester = ModelTester(config, verbose=False)\n\n def add_summary(self, model):\n with tf.variable_scope('summary'):\n summary = model.summary\n log_content = self.config.log_content\n\n if 'var' in log_content:\n summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs]\n if 'gard' in log_content:\n summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs]\n\n sum_levels = ['per_step', 'per_log', 'per_epoch']\n assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}'\n for i in range(len(sum_levels)):\n summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op]\n self.summary = summary\n return\n\n # Training main method\n # ------------------------------------------------------------------------------------------------------------------\n\n def train(self):\n config = self.config\n with tf.Graph().as_default(): # use one graph\n\n # prepare compute graph\n g = GraphBuilder(config, verbose=self.verbose)\n ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver\n model, dataset = g.model, g.dataset\n self.model = model\n\n # printing model parameters\n if self.verbose:\n print('\\n --------- printing grads {')\n re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping\n print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\\t')\n print('} --------- printing grads')\n # all ops in graph\n print('\\n --------- all ops {')\n re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping\n for n in tf.get_default_graph().as_graph_def().node:\n if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue\n print('\\t', n.name)\n print('} --------- all ops')\n # model params\n all_params_size = sum([np.prod(v.shape) for _, v in grads])\n # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads])\n # all_params_size = sess.run(all_params_size)\n print(f'==> Model have {all_params_size} total Params', flush=True)\n\n # init sess\n sess.run(tf.global_variables_initializer())\n if self.config.model_path:\n except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else []\n g.restore(sess, self.config.model_path, except_list=except_list)\n print(f'Model restored -- {self.config.model_path}')\n\n # running voting - used throughout the training process (accumulated voting)\n validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes)\n\n # train func\n if config.debug_nan:\n self.train_one_epoch = self.train_one_epoch_debug\n\n # train\n metric_best = None\n # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0]\n lr_scheduler = LrScheduler(config)\n snap_path = os.path.join(config.saving_path, config.snap_dir, config.snap_prefix)\n for epoch in range(1, config.max_epoch + 1):\n print(f'\\n****EPOCH {epoch}****')\n lr = lr_scheduler.learning_rate\n\n tic1 = time.time()\n step = self.train_one_epoch(sess, ops, epoch, lr, g=g)\n tic2 = time.time()\n print(f'total time: {(tic2 - tic1)/60:.1f}min, learning rate = {lr:.7f}', flush=True)\n\n if epoch % config.val_freq == 0:\n metric = self.tester.val_running_vote(sess, ops, dataset, model, validation_probs) # running voting\n if metric_best is None or metric > metric_best: # keep the best val\n metric_best = metric\n saver.save(sess, snap_path + '-best')\n print('best saved')\n # if config.save_best:\n # saver.save(sess, snap_path + '-best')\n # if config.save_best == 'center':\n # epoch_start = max(epoch // config.save_freq - config.max_to_keep // 2, 1)\n # save_snap = [i * config.save_freq for i in range(epoch_start, epoch_start + config.max_to_keep + 1)]\n # save_snap = [i for i in save_snap if i != epoch]\n # if epoch in save_snap:\n if config.save_freq and epoch % config.save_freq == 0:\n saver.save(sess, snap_path, global_step=epoch)\n lr_scheduler.step(epoch=1, step=step)\n\n # val & save last model if missed\n if epoch % config.val_freq != 0:\n self.tester.val_running_vote(sess, ops, dataset, model, validation_probs)\n if config.save_freq and epoch % config.save_freq != 0:\n saver.save(sess, snap_path, global_step=epoch)\n print('\\nfinished\\n', flush=True)\n return\n\n def train_one_epoch(self, sess, ops, epoch, lr, g=None):\n \"\"\"\n One epoch training\n \"\"\"\n config = self.config\n\n is_training = True\n batch_time = AverageMeter()\n loss_meter = {k: AverageMeter() for k in ops['loss_dict']}\n\n train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict']}\n feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr}\n sess.run(ops['train_init_op'])\n\n batch_idx = 0\n end = time.time()\n while True:\n try:\n rst = sess.run(train_ops, feed_dict=feed_dict)\n\n if (batch_idx + 1) % config.update_freq == 0:\n for k, v in rst['loss_dict'].items():\n loss_meter[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (batch_idx + 1) % config.print_freq == 0:\n loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()])\n print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True)\n\n batch_idx += 1\n except tf.errors.OutOfRangeError:\n break\n return batch_idx\n\n # Debug methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def show_memory_usage(self, batch_to_feed):\n\n for l in range(self.config.num_layers):\n neighb_size = list(batch_to_feed[self.in_neighbors_f32[l]].shape)\n dist_size = neighb_size + [self.config.num_kernel_points, 3]\n dist_memory = np.prod(dist_size) * 4 * 1e-9\n in_feature_size = neighb_size + [self.config.first_features_dim * 2**l]\n in_feature_memory = np.prod(in_feature_size) * 4 * 1e-9\n out_feature_size = [neighb_size[0], self.config.num_kernel_points, self.config.first_features_dim * 2**(l+1)]\n out_feature_memory = np.prod(out_feature_size) * 4 * 1e-9\n\n print('Layer {:d} => {:.1f}GB {:.1f}GB {:.1f}GB'.format(l,\n dist_memory,\n in_feature_memory,\n out_feature_memory))\n print('************************************')\n\n def train_one_epoch_debug(self, sess, ops, epoch, lr, g=None):\n \"\"\"\n One epoch training\n \"\"\"\n config = self.config\n\n is_training = True\n batch_time = AverageMeter()\n loss_meter = {k: AverageMeter() for k in ops['loss_dict']}\n\n inputs = self.model.inputs\n inputs_flat = {k: v for k, v in inputs.items() if not isinstance(v, (list, dict))}\n train_ops = {'train_op': ops['train_op'], 'loss_dict': ops['loss_dict'], 'inputs': inputs_flat, 'result_dict': ops['result_dict']}\n assert_ops = inputs['assert_ops'] if 'assert_ops' in inputs and len(inputs['assert_ops']) > 0 else []\n feed_dict = {ops['is_training']: is_training, ops['learning_rate']: lr}\n sess.run(ops['train_init_op'])\n\n if config.debug_grads:\n assert g is not None # [(g, v), ...]\n train_ops['grads'] = g.grads\n\n batch_idx = 0\n end = time.time()\n while True:\n try:\n with tf.control_dependencies(assert_ops):\n rst = sess.run(train_ops, feed_dict=feed_dict)\n\n # NaN appears\n if config.debug_grads:\n self.debug_grads_nan(sess, inputs, train_ops, rst)\n\n if any([np.isnan(v) for v in rst['loss_dict'].values()]):\n self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict'])\n raise ArithmeticError(f'NaN encountered !!!')\n\n if (batch_idx + 1) % config.update_freq == 0:\n for k, v in rst['loss_dict'].items():\n loss_meter[k].update(v)\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (batch_idx + 1) % config.print_freq == 0:\n loss_str = ' '.join([f'{n}={meter.avg:<6.2f}' for n, meter in loss_meter.items()])\n print(f'Step {batch_idx+1:08d} ' + loss_str + f' ---{batch_time.avg:5.3f} s/batch', flush=True)\n\n batch_idx += 1\n except tf.errors.OutOfRangeError:\n break\n return batch_idx\n\n def debug_grads_nan(self, sess, inputs, ops, rst):\n grads = ops['grads']\n grads_v = rst['grads']\n\n nan_grads = [(g, v, g_val, v_val) for (g, v), (g_val, v_val) in zip(grads, grads_v) if np.isnan(g_val).any() or np.isnan(v_val).any()]\n if not nan_grads:\n return\n\n lines = []\n for g, v, g_val, v_val in nan_grads:\n g_nan = 100 * np.sum(np.isnan(g_val)) / np.prod(g_val.shape)\n v_nan = 100 * np.sum(np.isnan(v_val)) / np.prod(v_val.shape)\n lines.append([v.name, g, '-', v_val.shape, f'/ {v_nan:.1f}', 'val nan', g_val.shape, f'/ {g_nan:.1f}', 'grad nan'])\n print_table(lines)\n\n self.debug_nan(sess, rst['inputs'], rst['result_dict'], rst['loss_dict'])\n raise ArithmeticError(f'NaN encountered in grads checking !!!')\n return\n\n def debug_nan(self, sess, inputs, result_dict, loss_dict):\n \"\"\"\n NaN happened, find where\n \"\"\"\n\n print('\\n\\n------------------------ NaN DEBUG ------------------------\\n')\n\n print('loss_dict :')\n print('*******************\\n')\n print_dict(loss_dict)\n\n # Then print a list of the trainable variables and if they have nan\n print('List of variables :')\n print('*******************\\n')\n all_vars = sess.run(tf.global_variables())\n for v, value in zip(tf.global_variables(), all_vars):\n nan_percentage = 100 * np.sum(np.isnan(value)) / np.prod(value.shape)\n line = v.name + (f'\\t => {nan_percentage:.1f}% of values are NaN' if np.isnan(value).any() else '')\n print(line)\n\n print('Inputs :')\n print('********')\n\n #Print inputs\n for layer in range(self.config.num_layers):\n\n print(f'Layer : {layer}')\n\n points = inputs['points'][layer]\n neighbors = inputs['neighbors'][layer]\n pools = inputs['pools'][layer]\n upsamples = inputs['upsamples'][layer]\n\n nan_percentage = 100 * np.sum(np.isnan(points)) / np.prod(points.shape)\n print('Points =>', points.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(neighbors)) / np.prod(neighbors.shape)\n print('neighbors =>', neighbors.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(pools)) / np.prod(pools.shape)\n print('pools =>', pools.shape, '{:.1f}% NaN'.format(nan_percentage))\n nan_percentage = 100 * np.sum(np.isnan(upsamples)) / np.prod(upsamples.shape)\n print('upsamples =>', upsamples.shape, '{:.1f}% NaN'.format(nan_percentage))\n\n features = inputs['features']\n nan_percentage = 100 * np.sum(np.isnan(features)) / np.prod(features.shape)\n print('features =>', features.shape, '{:.1f}% NaN'.format(nan_percentage))\n batch_weights = inputs['batch_weights']\n in_batches = inputs['in_batches']\n max_b = np.max(in_batches)\n print(in_batches.shape)\n in_b_sizes = np.sum(in_batches < max_b - 0.5, axis=-1)\n print('in_batch_sizes =>', in_b_sizes)\n out_batches = inputs['out_batches']\n max_b = np.max(out_batches)\n print(out_batches.shape)\n out_b_sizes = np.sum(out_batches < max_b - 0.5, axis=-1)\n print('out_batch_sizes =>', out_b_sizes)\n point_labels = inputs['point_labels']\n if self.config.dataset.startswith('ShapeNetPart_multi'):\n object_labels = inputs['object_labels']\n nan_percentage = 100 * np.sum(np.isnan(object_labels)) / np.prod(object_labels.shape)\n print('object_labels =>', object_labels.shape, '{:.1f}% NaN'.format(nan_percentage))\n augment_scales = inputs['augment_scales']\n augment_rotations = inputs['augment_rotations']\n\n print('\\npoolings and upsamples nums :\\n')\n\n #Print inputs\n for layer in range(self.config.num_layers):\n\n print(f'\\nLayer : {layer}')\n\n neighbors = inputs['neighbors'][layer]\n pools = inputs['pools'][layer]\n upsamples = inputs['upsamples'][layer]\n\n max_n = np.max(neighbors)\n nums = np.sum(neighbors < max_n - 0.5, axis=-1)\n print('min neighbors =>', np.min(nums))\n\n max_n = np.max(pools)\n nums = np.sum(pools < max_n - 0.5, axis=-1)\n print('min pools =>', np.min(nums))\n\n max_n = np.max(upsamples)\n nums = np.sum(upsamples < max_n - 0.5, axis=-1)\n print('min upsamples =>', np.min(nums))\n\n\n print('\\n--- NaN Debug Print End ---\\n\\n', flush=True)\n\n # # save everything to reproduce error - inputs/logits\n # file1 = os.path.join(self.config.saving_path, 'all_debug_inputs.pkl')\n # with open(file1, 'wb') as f1:\n # pickle.dump(inputs, f1)\n # file1 = os.path.join(self.config.saving_path, 'all_debug_logits.pkl')\n # with open(file1, 'wb') as f1:\n # pickle.dump(logits, f1)\n\n\n time.sleep(0.5)" }, { "identifier": "GraphBuilder", "path": "utils/tf_graph_builder.py", "snippet": "class GraphBuilder(object):\n\n def __init__(self, config, graph=None, verbose=True):\n \"\"\"\n get the full compute graph including dataset, model inference, loss, optimizer, lr scheduler and required ops\n \"\"\"\n\n if graph is not None: # if graph specified\n with graph.as_default():\n return self.__init__(config, None, verbose)\n\n if isinstance(config.rand_seed, int): # set seed\n tf.set_random_seed(config.rand_seed)\n np.random.seed(config.rand_seed)\n if verbose:\n print(f'==> np random seed = {np.random.get_state()[1][0]}')\n\n # model & dataset fn\n self.get_dataset = getattr(datasets, f'{config.dataset}Dataset') # datasets.[name]Dataset\n self.get_model = models.get_model\n # if config.distribute == 'tf_device': # full compute graph (handle devices & platforms)\n # self.build = self.build_devices\n # else:\n # raise NotImplementedError(f'not supported type of distributing graphs: config.distribute={config.distribute}')\n\n # Get dataset\n if verbose:\n print('==> Preparing datasets...')\n dataset = self.get_dataset(config, verbose)\n dataset.initialize(verbose)\n if verbose:\n print('==> setting dataset info:')\n print_dict(dataset.info, prefix='\\t')\n print_mem('>>> dataset built')\n config.update(dataset.info)\n\n # placeholder\n is_training = tf.placeholder(tf.bool, shape=())\n learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')\n # learning_rate = tf.get_variable('learning_rate', [], initializer=tf.constant_initializer(float('nan')), trainable=False)\n\n # # build model\n # grads, total_loss_dict, total_result_dict, model = self.build(dataset, is_training, config, verbose=verbose)\n\n # -------------------------------------------\n # Get model and loss on multiple GPU devices\n # -------------------------------------------\n # Allocating variables on CPU first will greatly accelerate multi-gpu training.\n # Ref: https://github.com/kuza55/keras-extras/issues/21\n flat_inputs = dataset.flat_inputs\n if config.cpu_variables:\n self.get_model(flat_inputs[0], is_training, config=config, verbose=verbose)\n tower_grads = []\n total_losses = []\n total_result = []\n for igpu in range(config.gpu_num):\n with tf.variable_scope(tf.get_variable_scope(), reuse=True if config.cpu_variables else tf.AUTO_REUSE):\n name_scope = f'gpu_{igpu}' if config.cpu_variables or igpu > 0 else ''\n verbose = not bool(name_scope)\n with tf.device(f'/gpu:{igpu}'), tf.name_scope(name_scope) as scope:\n flat_inputs_i = flat_inputs[igpu]\n model = self.get_model(flat_inputs_i, is_training, config=config, scope=scope, verbose=verbose) # inference model\n\n # collect per-gpu info\n result_dict = model.get_result() # inference result\n total_result.append(result_dict)\n\n loss_dict = model.get_loss() # loss\n total_losses.append(loss_dict)\n\n var_list = tf.trainable_variables() # vars & grads\n var_list = self.collect_vars(var_list, include_k=config.vars_train, except_k=config.vars_freeze)\n grads = tf.gradients(loss_dict['loss'], var_list, colocate_gradients_with_ops=config.colocate_gradients_with_ops) # normally, should NOT co-locate\n grads = list(zip(grads, var_list))\n tower_grads.append(grads)\n total_inputs = dict_list(flat_inputs)\n total_result = dict_list(total_result)\n total_losses = dict_list(total_losses)\n\n # average losses from multiple GPUs\n with tf.variable_scope('losses'):\n total_losses = {k: tf.reduce_mean(v, name=k) if len(v) > 1 else v[0] for k, v in total_losses.items()}\n\n # average grad\n with tf.variable_scope('gradients'):\n # [(gradient, variable), ...] - gradient averaged over gpu towers (if >1)\n grads = average_gradients(tower_grads, grad_norm=config.grad_norm, raise_on_none=config.grad_raise_none, grad_reduce=config.grad_reduce)\n\n # setup optimizer\n with tf.variable_scope('optimizer'):\n if config.optimizer == 'sgd':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=config.momentum)\n elif config.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n elif config.optimizer == 'adamW':\n from utils.AdamWOptimizer import AdamWeightDecayOptimizer\n optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=config.weight_decay, exclude_from_weight_decay=[\"bias\"])\n\n # if config.mixed_precision:\n # optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)\n\n # momentume as update ops\n update_ops = self.get_momentum_update(model, config, total_inputs, total_result)\n for ops in update_ops: # add to collection\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ops)\n\n # train op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(grads)\n # train_op = optimizer.apply_gradients(grads)\n # train_op = tf.group([train_op, update_ops])\n\n # saver\n save_vars = None\n if config.save_compact:\n save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')\n if isinstance(config.save_compact, bool):\n pass\n elif isinstance(config.save_compact, str) and config.save_compact == 'trained':\n vars_grads = {v: g for g, v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')}\n save_vars = [v for v in save_vars if v in vars_grads and vars_grads[v] is not None] # save only trained\n else:\n raise ValueError(f'not support save_compact={config.save_compact}')\n saver = tf.train.Saver(save_vars, max_to_keep=int(config.max_to_keep))\n\n # summary\n with tf.variable_scope('summary'):\n if config.summary and isinstance(config.summary, str):\n inputs = model.inputs\n if 'summary' not in inputs:\n inputs['summary'] = defaultdict(lambda: [])\n if config.summary == 'loss':\n inputs['summary']['per_step'] += [tf.summary.scalar(k, v) for k, v in total_losses.items()]\n # log grads - debug use\n # inputs = model.inputs\n # inputs['summary'] = defaultdict(lambda: [])\n # from models.utils import tf_Print\n # for i, (g, v) in enumerate(grads):\n # if config.summary:\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/v', v)]\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/g', g)]\n # if v.name in [\n # 'model/resnet_scene_segmentation_head/up_conv3/weights:0',\n # 'model/resnet_scene_segmentation_head/segmentation_head/weights:0',\n # ]:\n # print(f'print grad - {v.name}')\n # g = tf_Print(g, [f'grads - {v.name}', g])\n # grads[i] = (g, v)\n # input('\\nprint above grads')\n # summary - merge\n summary_dict = {} # {level : merged op}\n if config.summary:\n sum_levels = ['per_step', 'per_log', 'per_epoch']\n summary_ops = model.inputs['summary'] if 'summary' in model.inputs else {k: [] for k in sum_levels}\n assert all([k in sum_levels for k in summary_ops]), f'undesired keys in summary ops: {summary_ops.keys()}'\n for i in range(len(sum_levels)):\n lv = sum_levels[-i - 1]\n ops = sum([summary_ops[k] for k in sum_levels[:len(sum_levels)-i]], [])\n summary_dict[lv] = tf.summary.merge(ops) if len(ops) > 0 else tf.no_op()\n\n # Create a session\n cProto = tf.ConfigProto()\n if config.gpu_allow_growth:\n cProto.gpu_options.allow_growth = True\n if config.debug_single:\n cProto.device_count['CPU'] = 1\n # config.intra_op_parallelism_threads = config.inter_op_parallelism_threads = psutil.cpu_count(logical=False) # set to num of physical (default to logical) cpu cores\n cProto.allow_soft_placement = bool(config.allow_soft_placement) or not bool(config.gpu_devices) # if specified or cpu-only\n cProto.log_device_placement = False\n sess = tf.Session(config=cProto)\n\n ops = {\n 'train_init_op': dataset.train_init_op,\n 'val_init_op': dataset.val_init_op,\n 'test_init_op': dataset.test_init_op,\n\n 'train_op': train_op,\n 'is_training': is_training,\n 'learning_rate': learning_rate,\n\n 'inputs': dict(total_inputs),\n 'loss_dict': dict(total_losses),\n 'result_dict': dict(total_result),\n 'summary_dict': dict(summary_dict),\n }\n if verbose:\n print_mem('>>> model built')\n print('\\n -------- inputs {')\n print_dict(model.inputs, prefix='\\t')\n print('} --------- inputs')\n print('\\n -------- loss_dict {')\n print_dict(total_losses, prefix='\\t')\n print('} --------- loss_dict')\n print('\\n -------- result_dict {')\n print_dict(total_result, prefix='\\t')\n print('} --------- result_dict')\n\n self.ops = ops\n self.sess = sess\n self.grads = grads\n self.saver = saver\n\n self.model = model\n self.dataset = dataset\n\n # -------------------------------------------\n # Other utils & interfaces\n # -------------------------------------------\n\n def collect_vars(self, var_list, include_k=[], except_k=[], match='search'):\n # collect specified vars - default to all vars\n var_collect = []\n match_func = getattr(re, match)\n include_k = [include_k] if include_k and isinstance(include_k, str) else include_k\n except_k = [include_k] if except_k and isinstance(except_k, str) else except_k\n for v in var_list:\n if include_k and not any(match_func(k, v.name) for k in include_k):\n continue\n if except_k and any(match_func(k, v.name) for k in except_k):\n continue\n var_collect.append(v)\n return var_collect\n\n def get_momentum_update(self, model, config, total_inputs, total_result):\n # collect update ops for momentum update\n update_ops = []\n\n # update ops - momentum dict\n # NOTE - can be done in per-head fashion\n # => check only sepcial 'momentum_update_stage'\n for head_n, head_d in total_result.items():\n if 'momentum_dict' not in head_d or 'momentum_dict' not in total_inputs: continue\n if head_n not in total_inputs['momentum_dict']:\n raise KeyError(f'building momentum cycle for head {head_n}: missing tensor for momentum dict')\n head_cfg = model.head_dict['config'][head_n]\n\n # per-device input/output\n mom_in = total_inputs['momentum_dict'][head_n] # {k : [v = tensor]}, with inputs['momentum_dict'] = {head_n: {k : placeholder/vars}}\n mom_out = head_d['momentum_dict'] # {k: [v = tensor]}\n for k, v_out in mom_out.items():\n v_in = mom_in[k]\n\n # collect for update\n mom_avg = head_cfg.momentum_update\n mom_avg = float(mom_avg) if isinstance(mom_avg, (str, int)) else mom_avg # can be variable\n with tf.variable_scope(f'mom_dict_update/{head_n}/{k}'):\n if head_cfg.momentum_update_stage == 'glb_avg':\n # average over devices\n v_out = tf.reduce_mean(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n elif head_cfg.momentum_update_stage == 'glb_sum':\n # sum over devices\n v_out = tf.reduce_sum(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n # create update ops\n for igpu in range(config.gpu_num): # assign to each device input\n with tf.variable_scope(f'gpu_{igpu}/mom_dict_update/{head_n}/{k}', reuse=True):\n update_ops += [tf.assign(v_in[igpu], v_out[igpu])]\n\n return update_ops\n\n\n\n def restore(self, *args, **kwargs):\n argspec = inspect.getfullargspec(restore)\n kwargs.update(zip(argspec.args, args))\n kw_self = {'session': self.sess} # , 'saver': self.saver\n for k, v in kw_self.items():\n if k not in kwargs:\n kwargs[k] = v\n return restore(**kwargs)\n\n def close(self):\n self.sess.close()\n tf.reset_default_graph()" } ]
import numpy as np import multiprocessing as mp import os, sys, time, glob, pickle, psutil, argparse, importlib import tensorflow as tf import models, datasets import utils.memory_saving_gradients from config import load_config, log_config from utils.logger import print_mem, redirect_io from config.utils import get_snap from utils.tester import ModelTester from utils.trainer import ModelTrainer from utils.tf_graph_builder import GraphBuilder
16,504
# Common libs sys.path.insert(0, f'{os.getcwd()}') # Custom libs def get_last_train(cfg): saving_path = sorted(glob.glob(f'results/{cfg.dataset.lower()}/{cfg.name}/*')) return saving_path[-1] if saving_path else None parser = argparse.ArgumentParser() parser.add_argument('-c', '--cfg_path', type=str, help='config path') parser.add_argument('--gpus', type=str, default=None, help='the number/ID of GPU(s) to use [default: 1], 0 to use cpu only') parser.add_argument('--mode', type=str, default=None, help='options: train, val, test') parser.add_argument('--seed', type=int, default=None, dest='rand_seed', help='random seed for use') parser.add_argument('--data_path', type=str, default=None, help='path to dataset dir = data_path/dataset_name') parser.add_argument('--model_path', type=str, default=None, help='pretrained model path') parser.add_argument('--saving_path', type=str, default=None, help='specified saving path') parser.add_argument('--num_votes', type=float, default=None, help='least num of votes of each point (default to 30)') parser.add_argument('--num_threads', type=lambda n: mp.cpu_count() if n == 'a' else int(n) if n else None, default=None, help='the number of cpu to use for data loading') parser.add_argument('--set', type=str, help='external source to set the config - str of dict / yaml file') parser.add_argument('--debug', action='store_true', help='debug mode') FLAGS = parser.parse_args() # sys.argv = sys.argv[:1] # clean extra argv # ---------------------------------------------------------------------------- # # solve env & cfg # ---------------------------------------------------------------------------- # assert FLAGS.cfg_path is not None # load config - config path: config(dir).dataset_name(py).config_name(py_class)
# Common libs sys.path.insert(0, f'{os.getcwd()}') # Custom libs def get_last_train(cfg): saving_path = sorted(glob.glob(f'results/{cfg.dataset.lower()}/{cfg.name}/*')) return saving_path[-1] if saving_path else None parser = argparse.ArgumentParser() parser.add_argument('-c', '--cfg_path', type=str, help='config path') parser.add_argument('--gpus', type=str, default=None, help='the number/ID of GPU(s) to use [default: 1], 0 to use cpu only') parser.add_argument('--mode', type=str, default=None, help='options: train, val, test') parser.add_argument('--seed', type=int, default=None, dest='rand_seed', help='random seed for use') parser.add_argument('--data_path', type=str, default=None, help='path to dataset dir = data_path/dataset_name') parser.add_argument('--model_path', type=str, default=None, help='pretrained model path') parser.add_argument('--saving_path', type=str, default=None, help='specified saving path') parser.add_argument('--num_votes', type=float, default=None, help='least num of votes of each point (default to 30)') parser.add_argument('--num_threads', type=lambda n: mp.cpu_count() if n == 'a' else int(n) if n else None, default=None, help='the number of cpu to use for data loading') parser.add_argument('--set', type=str, help='external source to set the config - str of dict / yaml file') parser.add_argument('--debug', action='store_true', help='debug mode') FLAGS = parser.parse_args() # sys.argv = sys.argv[:1] # clean extra argv # ---------------------------------------------------------------------------- # # solve env & cfg # ---------------------------------------------------------------------------- # assert FLAGS.cfg_path is not None # load config - config path: config(dir).dataset_name(py).config_name(py_class)
cfg = load_config(cfg_path=FLAGS.cfg_path)
0
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "dist/py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered in Python 3.7+\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "RecentlyUsedContainer", "path": "dist/py/Python38/site-packages/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n _container: typing.OrderedDict[_KT, _VT]\n _maxsize: int\n dispose_func: typing.Callable[[_VT], None] | None\n lock: RLock\n\n def __init__(\n self,\n maxsize: int = 10,\n dispose_func: typing.Callable[[_VT], None] | None = None,\n ) -> None:\n super().__init__()\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n self._container = OrderedDict()\n self.lock = RLock()\n\n def __getitem__(self, key: _KT) -> _VT:\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key: _KT, value: _VT) -> None:\n evicted_item = None\n with self.lock:\n # Possibly evict the existing value of 'key'\n try:\n # If the key exists, we'll overwrite it, which won't change the\n # size of the pool. Because accessing a key should move it to\n # the end of the eviction line, we pop it out first.\n evicted_item = key, self._container.pop(key)\n self._container[key] = value\n except KeyError:\n # When the key does not exist, we insert the value first so that\n # evicting works in all cases, including when self._maxsize is 0\n self._container[key] = value\n if len(self._container) > self._maxsize:\n # If we didn't evict an existing value, and we've hit our maximum\n # size, then we have to evict the least recently used item from\n # the beginning of the container.\n evicted_item = self._container.popitem(last=False)\n\n # After releasing the lock on the pool, dispose of any evicted value.\n if evicted_item is not None and self.dispose_func:\n _, evicted_value = evicted_item\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key: _KT) -> None:\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self) -> int:\n with self.lock:\n return len(self._container)\n\n def __iter__(self) -> typing.NoReturn:\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self) -> None:\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(self._container.values())\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self) -> set[_KT]: # type: ignore[override]\n with self.lock:\n return set(self._container.keys())" }, { "identifier": "RequestMethods", "path": "dist/py/Python38/site-packages/urllib3/_request_methods.py", "snippet": "class RequestMethods:\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:\n self.headers = headers or {}\n\n def urlopen(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **kw: typing.Any,\n ) -> BaseHTTPResponse: # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n json: typing.Any | None = None,\n **urlopen_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if json is not None and body is not None:\n raise TypeError(\n \"request got values for both 'body' and 'json' parameters which are mutually exclusive\"\n )\n\n if json is not None:\n if headers is None:\n headers = self.headers.copy() # type: ignore\n if not (\"content-type\" in map(str.lower, headers.keys())):\n headers[\"Content-Type\"] = \"application/json\" # type: ignore\n\n body = _json.dumps(json, separators=(\",\", \":\"), ensure_ascii=False).encode(\n \"utf-8\"\n )\n\n if body is not None:\n urlopen_kw[\"body\"] = body\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method,\n url,\n fields=fields, # type: ignore[arg-type]\n headers=headers,\n **urlopen_kw,\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(\n self,\n method: str,\n url: str,\n fields: _TYPE_ENCODE_URL_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method: str,\n url: str,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": HTTPHeaderDict(headers)}\n body: bytes | str\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields), # type: ignore[arg-type]\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"].setdefault(\"Content-Type\", content_type)\n\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "ProxyConfig", "path": "dist/py/Python38/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "HTTPConnectionPool", "path": "dist/py/Python38/site-packages/urllib3/connectionpool.py", "snippet": "_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]\nclass ConnectionPool:\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host: str, port: int | None = None) -> None:\n def __str__(self) -> str:\n def __enter__(self: _SelfT) -> _SelfT:\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n def close(self) -> None:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n def _new_conn(self) -> BaseHTTPConnection:\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n def close(self) -> None:\n def is_same_host(self, url: str) -> bool:\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n def _new_conn(self) -> BaseHTTPSConnection:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\ndef connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:\ndef _normalize_host(host: None, scheme: str | None) -> None:\ndef _normalize_host(host: str, scheme: str | None) -> str:\ndef _normalize_host(host: str | None, scheme: str | None) -> str | None:\ndef _url_from_pool(\n pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None\n) -> str:\ndef _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:" }, { "identifier": "LocationValueError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"" }, { "identifier": "MaxRetryError", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param str url: The requested Url\n :param reason: The underlying error\n :type reason: :class:`Exception`\n\n \"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, reason: Exception | None = None\n ) -> None:\n self.reason = reason\n\n message = f\"Max retries exceeded with url: {url} (Caused by {reason!r})\"\n\n super().__init__(pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme: str | None) -> None:\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = f\"Proxy URL had unsupported scheme {scheme}, should use http:// or https://\"\n super().__init__(message)" }, { "identifier": "URLSchemeUnknown", "path": "dist/py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme: str):\n message = f\"Not supported URL scheme {scheme}\"\n super().__init__(message)\n\n self.scheme = scheme" }, { "identifier": "BaseHTTPResponse", "path": "dist/py/Python38/site-packages/urllib3/response.py", "snippet": "class BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,\n status: int,\n version: int,\n reason: str | None,\n decode_content: bool,\n request_url: str | None,\n retries: Retry | None = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self._has_decoded_content = False\n self._request_url: str | None = request_url\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: ContentDecoder | None = None\n\n def get_redirect_location(self) -> str | None | Literal[False]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> typing.Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> str | None:\n raise NotImplementedError()\n\n @url.setter\n def url(self, url: str | None) -> None:\n raise NotImplementedError()\n\n @property\n def connection(self) -> HTTPConnection | None:\n raise NotImplementedError()\n\n @property\n def retries(self) -> Retry | None:\n return self._retries\n\n @retries.setter\n def retries(self, retries: Retry | None) -> None:\n # Override the request_url if retries has a redirect location.\n if retries is not None and retries.history:\n self.url = retries.history[-1].redirect_location\n self._retries = retries\n\n def stream(\n self, amt: int | None = 2**16, decode_content: bool | None = None\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: bool | None, flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n if self._has_decoded_content:\n raise RuntimeError(\n \"Calling read(decode_content=False) is not supported after \"\n \"read(decode_content=True) was called.\"\n )\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n self._has_decoded_content = True\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> HTTPHeaderDict:\n warnings.warn(\n \"HTTPResponse.getheaders() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers\n\n def getheader(self, name: str, default: str | None = None) -> str | None:\n warnings.warn(\n \"HTTPResponse.getheader() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> str | None:\n return self.url" }, { "identifier": "_TYPE_SOCKET_OPTIONS", "path": "dist/py/Python38/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]" }, { "identifier": "connection_requires_http_tunnel", "path": "dist/py/Python38/site-packages/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n destination_scheme: str | None = None,\n) -> bool:\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "dist/py/Python38/site-packages/urllib3/util/retry.py", "snippet": "class Retry:\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool:\n\n .. code-block:: python\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request(\"GET\", \"https://example.com/\")\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=Retry(10))\n\n Retries can be disabled by passing ``False``:\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param Collection allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``None`` value to retry on any verb.\n\n :param Collection status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of previous retries}))\n\n seconds. If `backoff_jitter` is non-zero, this sleep is extended by::\n\n random.uniform(0, {backoff jitter})\n\n seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will\n sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever\n be longer than `backoff_max`.\n\n By default, backoff is disabled (factor set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param Collection remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Default maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n # Backward compatibility; assigned outside of the class.\n DEFAULT: typing.ClassVar[Retry]\n\n def __init__(\n self,\n total: bool | int | None = 10,\n connect: int | None = None,\n read: int | None = None,\n redirect: bool | int | None = None,\n status: int | None = None,\n other: int | None = None,\n allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,\n status_forcelist: typing.Collection[int] | None = None,\n backoff_factor: float = 0,\n backoff_max: float = DEFAULT_BACKOFF_MAX,\n raise_on_redirect: bool = True,\n raise_on_status: bool = True,\n history: tuple[RequestHistory, ...] | None = None,\n respect_retry_after_header: bool = True,\n remove_headers_on_redirect: typing.Collection[\n str\n ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,\n backoff_jitter: float = 0.0,\n ) -> None:\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.backoff_max = backoff_max\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or ()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n h.lower() for h in remove_headers_on_redirect\n )\n self.backoff_jitter = backoff_jitter\n\n def new(self, **kw: typing.Any) -> Retry:\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n allowed_methods=self.allowed_methods,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n backoff_max=self.backoff_max,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n backoff_jitter=self.backoff_jitter,\n )\n\n params.update(kw)\n return type(self)(**params) # type: ignore[arg-type]\n\n @classmethod\n def from_int(\n cls,\n retries: Retry | bool | int | None,\n redirect: bool | int | None = True,\n default: Retry | bool | int | None = None,\n ) -> Retry:\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self) -> float:\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n if self.backoff_jitter != 0.0:\n backoff_value += random.random() * self.backoff_jitter\n return float(max(0, min(self.backoff_max, backoff_value)))\n\n def parse_retry_after(self, retry_after: str) -> float:\n seconds: float\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(f\"Invalid Retry-After header: {retry_after}\")\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n seconds = max(seconds, 0)\n\n return seconds\n\n def get_retry_after(self, response: BaseHTTPResponse) -> float | None:\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self) -> None:\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response: BaseHTTPResponse | None = None) -> None:\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err: Exception) -> bool:\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err: Exception) -> bool:\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method: str) -> bool:\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n if self.allowed_methods and method.upper() not in self.allowed_methods:\n return False\n return True\n\n def is_retry(\n self, method: str, status_code: int, has_retry_after: bool = False\n ) -> bool:\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return bool(\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self) -> bool:\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = [\n x\n for x in (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n if x\n ]\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method: str | None = None,\n url: str | None = None,\n response: BaseHTTPResponse | None = None,\n error: Exception | None = None,\n _pool: ConnectionPool | None = None,\n _stacktrace: TracebackType | None = None,\n ) -> Retry:\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.BaseHTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or method is None or not self._is_method_retryable(method):\n raise reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n response_redirect_location = response.get_redirect_location()\n if response_redirect_location:\n redirect_location = response_redirect_location\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n reason = error or ResponseError(cause)\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(total={self.total}, connect={self.connect}, \"\n f\"read={self.read}, redirect={self.redirect}, status={self.status})\"\n )" }, { "identifier": "Timeout", "path": "dist/py/Python38/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "Url", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" }, { "identifier": "parse_url", "path": "dist/py/Python38/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" } ]
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing_extensions import Literal
20,132
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None
key__proxy: Url | None
14
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/convariance.py
[ { "identifier": "CovarianceError", "path": "mtpy/modeling/modem/exception.py", "snippet": "class CovarianceError(ModEMError):\n \"\"\" Raise for Covariance class specific exceptions\"\"\"\n\n pass" }, { "identifier": "Model", "path": "mtpy/modeling/modem/model.py", "snippet": "class Model:\n \"\"\"\n make and read a FE mesh grid\n\n The mesh assumes the coordinate system where:\n x == North\n y == East\n z == + down\n\n All dimensions are in meters.\n\n The mesh is created by first making a regular grid around the station area,\n then padding cells are added that exponentially increase to the given\n extensions. Depth cell increase on a log10 scale to the desired depth,\n then padding cells are added that increase exponentially.\n\n Arguments\n -------------\n **station_object** : mtpy.modeling.modem.Stations object\n .. seealso:: mtpy.modeling.modem.Stations\n\n Examples\n -------------\n\n :Example 1 --> create mesh first then data file: ::\n\n >>> import mtpy.modeling.modem as modem\n >>> import os\n >>> # 1) make a list of all .edi files that will be inverted for\n >>> edi_path = r\"/home/EDI_Files\"\n >>> edi_list = [os.path.join(edi_path, edi)\n for edi in os.listdir(edi_path)\n >>> ... if edi.find('.edi') > 0]\n >>> # 2) Make a Stations object\n >>> stations_obj = modem.Stations()\n >>> stations_obj.get_station_locations_from_edi(edi_list)\n >>> # 3) make a grid from the stations themselves with 200m cell spacing\n >>> mmesh = modem.Model(station_obj)\n >>> # change cell sizes\n >>> mmesh.cell_size_east = 200,\n >>> mmesh.cell_size_north = 200\n >>> mmesh.ns_ext = 300000 # north-south extension\n >>> mmesh.ew_ext = 200000 # east-west extension of model\n >>> mmesh.make_mesh()\n >>> # check to see if the mesh is what you think it should be\n >>> msmesh.plot_mesh()\n >>> # all is good write the mesh file\n >>> msmesh.write_model_file(save_path=r\"/home/modem/Inv1\")\n >>> # create data file\n >>> md = modem.Data(edi_list, station_locations=mmesh.station_locations)\n >>> md.write_data_file(save_path=r\"/home/modem/Inv1\")\n\n :Example 2 --> Rotate Mesh: ::\n\n >>> mmesh.mesh_rotation_angle = 60\n >>> mmesh.make_mesh()\n\n .. note:: ModEM assumes all coordinates are relative to North and East, and\n does not accommodate mesh rotations, therefore, here the rotation\n is of the stations, which essentially does the same thing. You\n will need to rotate you data to align with the 'new' coordinate\n system.\n\n ==================== ======================================================\n Attributes Description\n ==================== ======================================================\n _logger python logging object that put messages in logging\n format defined in logging configure file, see MtPyLog\n more information\n cell_number_ew optional for user to specify the total number of sells\n on the east-west direction. *default* is None\n cell_number_ns optional for user to specify the total number of sells\n on the north-south direction. *default* is None\n cell_size_east mesh block width in east direction\n *default* is 500\n cell_size_north mesh block width in north direction\n *default* is 500\n grid_center center of the mesh grid\n grid_east overall distance of grid nodes in east direction\n grid_north overall distance of grid nodes in north direction\n grid_z overall distance of grid nodes in z direction\n model_fn full path to initial file name\n model_fn_basename default name for the model file name\n n_air_layers number of air layers in the model. *default* is 0\n n_layers total number of vertical layers in model\n nodes_east relative distance between nodes in east direction\n nodes_north relative distance between nodes in north direction\n nodes_z relative distance between nodes in east direction\n pad_east number of cells for padding on E and W sides\n *default* is 7\n pad_north number of cells for padding on S and N sides\n *default* is 7\n pad_num number of cells with cell_size with outside of\n station area. *default* is 3\n pad_method method to use to create padding:\n extent1, extent2 - calculate based on ew_ext and\n ns_ext\n stretch - calculate based on pad_stretch factors\n pad_stretch_h multiplicative number for padding in horizontal\n direction.\n pad_stretch_v padding cells N & S will be pad_root_north**(x)\n pad_z number of cells for padding at bottom\n *default* is 4\n ew_ext E-W extension of model in meters\n ns_ext N-S extension of model in meters\n res_scale scaling method of res, supports\n 'loge' - for log e format\n 'log' or 'log10' - for log with base 10\n 'linear' - linear scale\n *default* is 'loge'\n res_list list of resistivity values for starting model\n res_model starting resistivity model\n res_initial_value resistivity initial value for the resistivity model\n *default* is 100\n mesh_rotation_angle Angle to rotate the grid to. Angle is measured\n positve clockwise assuming North is 0 and east is 90.\n *default* is None\n save_path path to save file to\n sea_level sea level in grid_z coordinates. *default* is 0\n station_locations location of stations\n title title in initial file\n z1_layer first layer thickness\n z_bottom absolute bottom of the model *default* is 300,000\n z_target_depth Depth of deepest target, *default* is 50,000\n ==================== ======================================================\n\n\n \"\"\"\n\n def __init__(self, station_locations=None, center_point=None, **kwargs):\n self._logger = logger\n\n self.station_locations = None\n self.center_point = MTLocation()\n\n if station_locations is not None:\n self.station_locations = station_locations\n\n if center_point is not None:\n self.center_point = center_point\n self.model_epsg = self.center_point.utm_epsg\n\n # size of cells within station area in meters\n self.cell_size_east = 500\n self.cell_size_north = 500\n\n # FZ: added this for user input number of cells in the horizontal mesh\n self.cell_number_ew = None\n self.cell_number_ns = None\n\n # padding cells on either side\n self.pad_east = 7\n self.pad_north = 7\n self.pad_z = 4\n\n self.pad_num = 3\n\n self.ew_ext = 100000\n self.ns_ext = 100000\n\n # root of padding cells\n self.pad_stretch_h = 1.2\n self.pad_stretch_v = 1.2\n\n self.z1_layer = 10\n self.z_layer_rounding = 0\n self.z_target_depth = 50000\n self.z_bottom = 300000\n\n # number of vertical layers\n self.n_layers = 30\n\n # number of air layers\n self.n_air_layers = 0\n # sea level in grid_z coordinates. Auto adjusts when topography read in?\n self.sea_level = 0.0\n\n # strike angle to rotate grid to\n self.mesh_rotation_angle = 0\n\n # --> attributes to be calculated\n # grid nodes\n self._nodes_east = None\n self._nodes_north = None\n self._nodes_z = None\n\n # grid locations\n self.grid_east = None\n self.grid_north = None\n self.grid_z = kwargs.pop(\"grid_z\", None)\n if self.grid_z is not None:\n self.n_layers = len(self.grid_z)\n self.z_mesh_method = \"custom\"\n else:\n self.z_mesh_method = \"new\"\n if \"z_mesh_method\" in list(kwargs.keys()):\n self.z_mesh_method = kwargs[\"z_mesh_method\"]\n\n # method to use to create padding\n self.pad_method = \"extent1\"\n\n self.grid_center = None\n self.surface_dict = {}\n\n # resistivity model\n self.res_initial_value = 100.0\n self.res_model = None\n\n # initial file stuff\n self.save_path = Path().cwd()\n self.model_fn_basename = \"ModEM_Model_File.rho\"\n\n self.title = \"Model File written by MTpy.modeling.modem\"\n self.res_scale = \"loge\"\n\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n else:\n self._logger.warning(\n f\"Argument {key}={value} is not supportted thus not been set.\"\n )\n\n def __str__(self):\n lines = [\"ModEM Model Object:\", \"-\" * 20]\n # --> print out useful information\n try:\n lines.append(\n f\"\\tNumber of stations = {len(self.station_locations.station)}\"\n )\n except AttributeError:\n lines.append(\"\\tNumber of stations = unknown\")\n\n lines.append(\"\\tMesh Parameter: \")\n lines.append(f\"\\t\\tcell_size_east: {self.cell_size_east}\")\n lines.append(f\"\\t\\tcell_size_north: {self.cell_size_north}\")\n lines.append(f\"\\t\\tpad_east: {self.pad_east}\")\n lines.append(f\"\\t\\tpad_north: {self.pad_north}\")\n lines.append(f\"\\t\\tpad_num: {self.pad_num}\")\n lines.append(f\"\\t\\tz1_layer: {self.z1_layer}\")\n lines.append(f\"\\t\\tz_target_depth: {self.z_target_depth}\")\n lines.append(f\"\\t\\tn_layers: {self.n_layers}\")\n lines.append(f\"\\t\\tres_initial_value: {self.res_initial_value}\")\n lines.append(\"\\tDimensions: \")\n lines.append(f\"\\t\\te-w: {self.grid_east.size}\")\n lines.append(f\"\\t\\tn-s: {self.grid_north.size}\")\n lines.append(f\"\\t\\tz: {self.grid_z.size} (without 7 air layers)\")\n lines.append(\"\\tExtensions: \")\n lines.append(f\"\\t\\te-w: {self.nodes_east.__abs__().sum():.1f} (m)\")\n lines.append(f\"\\t\\tn-s: {self.nodes_north.__abs__().sum():.1f} (m)\")\n lines.append(f\"\\t\\t0-z: {self.nodes_z.__abs__().sum():.1f} (m)\")\n if self.mesh_rotation_angle != 0:\n lines.append(\n f\"\\tStations rotated by: {self.mesh_rotation_angle:.1f} deg clockwise positive from N\"\n )\n\n lines.append(\n \" ** Note ModEM does not accommodate mesh rotations, it assumes\"\n )\n lines.append(\" all coordinates are aligned to geographic N, E\")\n lines.append(\n \" therefore rotating the stations will have a similar effect\"\n )\n lines.append(\" as rotating the mesh.\")\n lines.append(\"-\" * 20)\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n @property\n def save_path(self):\n return self._save_path\n\n @save_path.setter\n def save_path(self, save_path):\n if save_path is None:\n self._save_path = Path().cwd()\n else:\n self._save_path = Path(save_path)\n\n if not self._save_path.exists():\n self._save_path.mkdir()\n\n @property\n def model_fn(self):\n return self.save_path.joinpath(self.model_fn_basename)\n\n @model_fn.setter\n def model_fn(self, filename):\n if filename is not None:\n filename = Path(filename)\n self.save_path = filename.parent\n self.model_fn_basename = filename.name\n\n @property\n def model_epsg(self):\n return self.center_point.utm_epsg\n\n @model_epsg.setter\n def model_epsg(self, value):\n self.center_point.utm_epsg = value\n\n # --> make nodes and grid symbiotic so if you set one the other one\n # gets set as well\n # Nodes East\n @property\n def nodes_east(self):\n if self.grid_east is not None:\n self._nodes_east = np.array(\n [\n abs(self.grid_east[ii + 1] - self.grid_east[ii])\n for ii in range(self.grid_east.size - 1)\n ]\n )\n return self._nodes_east\n\n @nodes_east.setter\n def nodes_east(self, nodes):\n nodes = np.array(nodes)\n self._nodes_east = nodes\n self.grid_east = np.array(\n [\n nodes[0:ii].sum() for ii in range(nodes.size + 1)\n ] # -nodes.sum() / 2 +\n ) # + [shift])#[nodes.sum() / 2]\n\n # Nodes North\n @property\n def nodes_north(self):\n if self.grid_north is not None:\n self._nodes_north = np.array(\n [\n abs(self.grid_north[ii + 1] - self.grid_north[ii])\n for ii in range(self.grid_north.size - 1)\n ]\n )\n return self._nodes_north\n\n @nodes_north.setter\n def nodes_north(self, nodes):\n nodes = np.array(nodes)\n self._nodes_north = nodes\n self.grid_north = np.array(\n [\n nodes[0:ii].sum() for ii in range(nodes.size + 1)\n ] # -nodes.sum() / 2 +\n ) # + [shift])#[nodes.sum() / 2]\n\n @property\n def nodes_z(self):\n if self.grid_z is not None:\n self._nodes_z = np.array(\n [\n abs(self.grid_z[ii + 1] - self.grid_z[ii])\n for ii in range(self.grid_z.size - 1)\n ]\n )\n\n return self._nodes_z\n\n @nodes_z.setter\n def nodes_z(self, nodes):\n nodes = np.array(nodes)\n self._nodes_z = nodes\n self.grid_z = np.array(\n [nodes[0:ii].sum() for ii in range(nodes.size)] + [nodes.sum()]\n )\n\n # need some arrays for plotting that are the same length as the\n # resistivity model\n @property\n def plot_east(self):\n plot_east = np.array(\n [self.nodes_east[0:ii].sum() for ii in range(self.nodes_east.size)]\n )\n return plot_east - plot_east[-1] / 2.0\n\n @property\n def plot_north(self):\n plot_north = np.array(\n [\n self.nodes_north[0:ii].sum()\n for ii in range(self.nodes_north.size)\n ]\n )\n return plot_north - plot_north[-1] / 2.0\n\n @property\n def plot_z(self):\n return np.array(\n [self.nodes_z[0:ii].sum() for ii in range(self.nodes_z.size)]\n )\n\n def make_mesh(self, verbose=True):\n \"\"\"\n create finite element mesh according to user-input parameters.\n\n The mesh is built by:\n 1. Making a regular grid within the station area.\n 2. Adding pad_num of cell_width cells outside of station area\n 3. Adding padding cells to given extension and number of padding\n cells.\n 4. Making vertical cells starting with z1_layer increasing\n logarithmically (base 10) to z_target_depth and num_layers.\n 5. Add vertical padding cells to desired extension.\n 6. Check to make sure none of the stations lie on a node.\n If they do then move the node by .02*cell_width\n\n \"\"\"\n\n # --> find the edges of the grid\n # calculate the extra width of padding cells\n # multiply by 1.5 because this is only for 1 side\n pad_width_east = self.pad_num * 1.5 * self.cell_size_east\n pad_width_north = self.pad_num * 1.5 * self.cell_size_north\n\n # get the extremities\n west = self.station_locations.model_east.min() - pad_width_east\n east = self.station_locations.model_east.max() + pad_width_east\n south = self.station_locations.model_north.min() - pad_width_north\n north = self.station_locations.model_north.max() + pad_width_north\n\n # round the numbers so they are easier to read\n west = np.round(west, -2)\n east = np.round(east, -2)\n south = np.round(south, -2)\n north = np.round(north, -2)\n\n # -------make a grid around the stations from the parameters above------\n\n # adjust the edges so we have a whole number of cells\n add_ew = ((east - west) % self.cell_size_east) / 2.0\n add_ns = ((north - south) % self.cell_size_north) / 2.0\n\n # --> make the inner grid first\n inner_east = np.arange(\n west + add_ew - self.cell_size_east,\n east - add_ew + 2 * self.cell_size_east,\n self.cell_size_east,\n )\n inner_north = np.arange(\n south + add_ns + self.cell_size_north,\n north - add_ns + 2 * self.cell_size_north,\n self.cell_size_north,\n )\n\n # compute padding cells\n # first validate ew_ext and ns_ext to ensure it is large enough\n if \"extent\" in self.pad_method:\n self._validate_extent(\n inner_east.min(),\n inner_east.max(),\n inner_north.min(),\n inner_north.max(),\n )\n\n if self.pad_method == \"extent1\":\n padding_east = mtmesh.get_padding_cells(\n self.cell_size_east,\n self.ew_ext / 2 - east,\n self.pad_east,\n self.pad_stretch_h,\n )\n padding_north = mtmesh.get_padding_cells(\n self.cell_size_north,\n self.ns_ext / 2 - north,\n self.pad_north,\n self.pad_stretch_h,\n )\n elif self.pad_method == \"extent2\":\n padding_east = mtmesh.get_padding_cells2(\n self.cell_size_east,\n inner_east[-1],\n self.ew_ext / 2.0,\n self.pad_east,\n )\n padding_north = mtmesh.get_padding_cells2(\n self.cell_size_north,\n inner_north[-1],\n self.ns_ext / 2.0,\n self.pad_north,\n )\n elif self.pad_method == \"stretch\":\n padding_east = mtmesh.get_padding_from_stretch(\n self.cell_size_east, self.pad_stretch_h, self.pad_east\n )\n padding_north = mtmesh.get_padding_from_stretch(\n self.cell_size_north, self.pad_stretch_h, self.pad_north\n )\n else:\n raise NameError(\n 'Padding method \"{}\" is not supported'.format(self.pad_method)\n )\n\n # make the horizontal grid\n self.grid_east = np.append(\n np.append(-1 * padding_east[::-1] + inner_east.min(), inner_east),\n padding_east + inner_east.max(),\n )\n self.grid_north = np.append(\n np.append(\n -1 * padding_north[::-1] + inner_north.min(), inner_north\n ),\n padding_north + inner_north.max(),\n )\n\n # --> need to make sure none of the stations lie on the nodes\n for s_east in sorted(self.station_locations.model_east):\n try:\n node_index = np.where(\n abs(s_east - self.grid_east) < 0.02 * self.cell_size_east\n )[0][0]\n if s_east - self.grid_east[node_index] > 0:\n self.grid_east[node_index] -= 0.02 * self.cell_size_east\n elif s_east - self.grid_east[node_index] < 0:\n self.grid_east[node_index] += 0.02 * self.cell_size_east\n except IndexError:\n continue\n\n # --> need to make sure none of the stations lie on the nodes\n for s_north in sorted(self.station_locations.model_north):\n try:\n node_index = np.where(\n abs(s_north - self.grid_north)\n < 0.02 * self.cell_size_north\n )[0][0]\n if s_north - self.grid_north[node_index] > 0:\n self.grid_north[node_index] -= 0.02 * self.cell_size_north\n elif s_north - self.grid_north[node_index] < 0:\n self.grid_north[node_index] += 0.02 * self.cell_size_north\n except IndexError:\n continue\n\n if self.z_mesh_method == \"custom\":\n if self.grid_z is None:\n self.z_mesh_method = \"new\"\n self._logger.warn(\n \"No grid_z provided, creating new z mesh using default method\"\n )\n\n if self.z_mesh_method == \"custom\":\n self.nodes_z, z_grid = (\n self.grid_z[1:] - self.grid_z[:-1],\n self.grid_z,\n )\n elif self.z_mesh_method == \"new\":\n self.nodes_z, z_grid = self.make_z_mesh()\n else:\n raise NameError(\n 'Z mesh method \"{}\" is not supported'.format(\n self.z_mesh_method\n )\n )\n\n # compute grid center\n center_east = np.round(\n self.grid_east.min() - self.grid_east.mean(), -1\n )\n center_north = np.round(\n self.grid_north.min() - self.grid_north.mean(), -1\n )\n center_z = 0\n\n # this is the value to the lower left corner from the center.\n self.grid_center = np.array([center_north, center_east, center_z])\n\n # make the resistivity array\n self.res_model = np.zeros(\n (self.nodes_north.size, self.nodes_east.size, self.nodes_z.size)\n )\n self.res_model[:, :, :] = self.res_initial_value\n\n # --> print out useful information\n if verbose:\n print(self.__str__())\n\n def make_z_mesh(self, n_layers=None):\n \"\"\"\n new version of make_z_mesh. make_z_mesh and M\n \"\"\"\n n_layers = self.n_layers if n_layers is None else n_layers\n\n # --> make depth grid\n # if n_airlayers < 0; set to 0\n log_z = mtcc.make_log_increasing_array(\n self.z1_layer, self.z_target_depth, n_layers - self.pad_z\n )\n\n if self.z_layer_rounding is not None:\n z_nodes = np.around(log_z, decimals=self.z_layer_rounding)\n else:\n # round any values less than 100 to the same s.f. as z1_layer\n z_nodes = np.around(\n log_z[log_z < 100],\n decimals=-int(np.floor(np.log10(self.z1_layer))),\n )\n # round any values greater than or equal to 100 to the nearest 100\n z_nodes = np.append(\n z_nodes, np.around(log_z[log_z >= 100], decimals=-2)\n )\n\n # index of top of padding\n # itp = len(z_nodes) - 1\n\n # padding cells in the vertical direction\n z_0 = float(z_nodes[-1])\n for ii in range(1, self.pad_z + 1):\n pad_d = np.round(z_0 * self.pad_stretch_v**ii, -2)\n z_nodes = np.append(z_nodes, pad_d)\n # add air layers and define ground surface level.\n # initial layer thickness is same as z1_layer\n # z_nodes = np.hstack([[z1_layer] * n_air, z_nodes])\n\n # make an array of absolute values\n z_grid = np.array(\n [z_nodes[:ii].sum() for ii in range(z_nodes.shape[0] + 1)]\n )\n\n return z_nodes, z_grid\n\n def add_layers_to_mesh(\n self, n_add_layers=None, layer_thickness=None, where=\"top\"\n ):\n \"\"\"\n Function to add constant thickness layers to the top or bottom of mesh.\n Note: It is assumed these layers are added before the topography. If\n you want to add topography layers, use function add_topography_to_model\n\n :param n_add_layers: integer, number of layers to add\n :param layer_thickness: real value or list/array. Thickness of layers,\n defaults to z1 layer. Can provide a single value\n or a list/array containing multiple layer\n thicknesses.\n :param where: where to add, top or bottom\n\n\n \"\"\"\n # create array containing layers to add\n if layer_thickness is None:\n layer_thickness = self.z1_layer\n if np.iterable(layer_thickness):\n add_layers = np.insert(np.cumsum(layer_thickness), 0, 0)[:-1]\n layer_thickness = layer_thickness[-1]\n\n if n_add_layers != len(add_layers):\n self._logger.warn(\n \"Updating number of layers to reflect the length of the layer thickness array\"\n )\n n_add_layers = len(add_layers)\n else:\n add_layers = np.arange(\n 0, n_add_layers * layer_thickness, layer_thickness\n )\n\n # create a new z grid\n self.grid_z = np.hstack(\n [add_layers, self.grid_z + add_layers[-1] + layer_thickness]\n )\n\n # update the number of layers\n self.n_layers = len(self.grid_z) - 1\n\n # add the extra layer to the res model\n self.res_model = np.vstack(\n [self.res_model[:, :, :n_add_layers].T, self.res_model.T]\n ).T\n\n def assign_resistivity_from_surface_data(\n self, top_surface, bottom_surface, resistivity_value\n ):\n \"\"\"\n assign resistivity value to all points above or below a surface\n requires the surface_dict attribute to exist and contain data for\n surface key (can get this information from ascii file using\n project_surface)\n\n **inputs**\n surface_name = name of surface (must correspond to key in surface_dict)\n resistivity_value = value to assign\n where = 'above' or 'below' - assign resistivity above or below the\n surface\n \"\"\"\n\n # FZ: should ref-define the self.res_model if its shape has changed after topo air layer are added\n\n gcz = np.mean([self.grid_z[:-1], self.grid_z[1:]], axis=0)\n\n self._logger.debug(\n \"gcz is the cells centre coordinates: %s, %s\" % (len(gcz), gcz)\n )\n\n # assign resistivity value\n for j in range(len(self.res_model)):\n for i in range(len(self.res_model[j])):\n ii = np.where(\n (gcz > top_surface[j, i]) & (gcz <= bottom_surface[j, i])\n )[0]\n self.res_model[j, i, ii] = resistivity_value\n\n def write_model_file(self, **kwargs):\n \"\"\"\n will write an initial file for ModEM.\n\n Note that x is assumed to be S --> N, y is assumed to be W --> E and\n z is positive downwards. This means that index [0, 0, 0] is the\n southwest corner of the first layer. Therefore if you build a model\n by hand the layer block will look as it should in map view.\n\n Also, the xgrid, ygrid and zgrid are assumed to be the relative\n distance between neighboring nodes. This is needed because wsinv3d\n builds the model from the bottom SW corner assuming the cell width\n from the init file.\n\n Key Word Arguments:\n ----------------------\n\n **nodes_north** : np.array(nx)\n block dimensions (m) in the N-S direction.\n **Note** that the code reads the grid assuming that\n index=0 is the southern most point.\n\n **nodes_east** : np.array(ny)\n block dimensions (m) in the E-W direction.\n **Note** that the code reads in the grid assuming that\n index=0 is the western most point.\n\n **nodes_z** : np.array(nz)\n block dimensions (m) in the vertical direction.\n This is positive downwards.\n\n **save_path** : string\n Path to where the initial file will be saved\n to save_path/model_fn_basename\n\n **model_fn_basename** : string\n basename to save file to\n *default* is ModEM_Model.ws\n file is saved at save_path/model_fn_basename\n\n **title** : string\n Title that goes into the first line\n *default* is Model File written by MTpy.modeling.modem\n\n **res_model** : np.array((nx,ny,nz))\n Prior resistivity model.\n\n .. note:: again that the modeling code\n assumes that the first row it reads in is the southern\n most row and the first column it reads in is the\n western most column. Similarly, the first plane it\n reads in is the Earth's surface.\n\n **res_starting_value** : float\n starting model resistivity value,\n assumes a half space in Ohm-m\n *default* is 100 Ohm-m\n\n **res_scale** : [ 'loge' | 'log' | 'log10' | 'linear' ]\n scale of resistivity. In the ModEM code it\n converts everything to Loge,\n *default* is 'loge'\n\n \"\"\"\n for key in list(kwargs.keys()):\n setattr(self, key, kwargs[key])\n\n # get resistivity model\n if self.res_model is None:\n self.res_model = np.zeros(\n (\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n )\n )\n self.res_model[:, :, :] = self.res_initial_value\n\n elif type(self.res_model) in [float, int]:\n self.res_initial_value = self.res_model\n self.res_model = np.zeros(\n (\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n )\n )\n self.res_model[:, :, :] = self.res_initial_value\n\n # --> write file\n with open(self.model_fn, \"w\") as ifid:\n ifid.write(\"# {0}\\n\".format(self.title.upper()))\n ifid.write(\n \"{0:>5}{1:>5}{2:>5}{3:>5} {4}\\n\".format(\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n 0,\n self.res_scale.upper(),\n )\n )\n\n # write S --> N node block\n for ii, nnode in enumerate(self.nodes_north):\n ifid.write(\"{0:>12.3f}\".format(abs(nnode)))\n\n ifid.write(\"\\n\")\n\n # write W --> E node block\n for jj, enode in enumerate(self.nodes_east):\n ifid.write(\"{0:>12.3f}\".format(abs(enode)))\n ifid.write(\"\\n\")\n\n # write top --> bottom node block\n for kk, zz in enumerate(self.nodes_z):\n ifid.write(\"{0:>12.3f}\".format(abs(zz)))\n ifid.write(\"\\n\")\n\n # write the resistivity in log e format\n if self.res_scale.lower() == \"loge\":\n write_res_model = np.log(self.res_model[::-1, :, :])\n elif (\n self.res_scale.lower() == \"log\"\n or self.res_scale.lower() == \"log10\"\n ):\n write_res_model = np.log10(self.res_model[::-1, :, :])\n elif self.res_scale.lower() == \"linear\":\n write_res_model = self.res_model[::-1, :, :]\n else:\n raise ModelError(\n 'resistivity scale \"{}\" is not supported.'.format(\n self.res_scale\n )\n )\n\n # write out the layers from resmodel\n for zz in range(self.nodes_z.size):\n ifid.write(\"\\n\")\n for ee in range(self.nodes_east.size):\n for nn in range(self.nodes_north.size):\n ifid.write(\n \"{0:>13.5E}\".format(write_res_model[nn, ee, zz])\n )\n ifid.write(\"\\n\")\n\n if self.grid_center is None:\n # compute grid center\n center_east = -self.nodes_east.__abs__().sum() / 2\n center_north = -self.nodes_north.__abs__().sum() / 2\n center_z = 0\n self.grid_center = np.array(\n [center_north, center_east, center_z]\n )\n\n ifid.write(\n \"\\n{0:>16.3f}{1:>16.3f}{2:>16.3f}\\n\".format(\n self.grid_center[0],\n self.grid_center[1],\n self.grid_center[2],\n )\n )\n\n if self.mesh_rotation_angle is None:\n ifid.write(\"{0:>9.3f}\\n\".format(0))\n else:\n ifid.write(\"{0:>9.3f}\\n\".format(self.mesh_rotation_angle))\n\n # not needed ifid.close()\n\n self._logger.info(\"Wrote file to: {0}\".format(self.model_fn))\n\n def read_model_file(self, model_fn=None):\n \"\"\"\n read an initial file and return the pertinent information including\n grid positions in coordinates relative to the center point (0,0) and\n starting model.\n\n Note that the way the model file is output, it seems is that the\n blocks are setup as\n\n ModEM: WS:\n ---------- -----\n 0-----> N_north 0-------->N_east\n | |\n | |\n V V\n N_east N_north\n\n\n Arguments:\n ----------\n\n **model_fn** : full path to initializing file.\n\n Outputs:\n --------\n\n **nodes_north** : np.array(nx)\n array of nodes in S --> N direction\n\n **nodes_east** : np.array(ny)\n array of nodes in the W --> E direction\n\n **nodes_z** : np.array(nz)\n array of nodes in vertical direction positive downwards\n\n **res_model** : dictionary\n dictionary of the starting model with keys as layers\n\n **res_list** : list\n list of resistivity values in the model\n\n **title** : string\n title string\n\n \"\"\"\n\n if model_fn is not None:\n self.model_fn = model_fn\n\n if self.model_fn is None:\n raise ModelError(\"model_fn is None, input a model file name\")\n\n if not self.model_fn.exists():\n raise ModelError(f\"Cannot find {self.model_fn}, check path\")\n\n with open(self.model_fn, \"r\") as ifid:\n ilines = ifid.readlines()\n\n self.title = ilines[0].strip()\n\n # get size of dimensions, remembering that x is N-S, y is E-W, z is + down\n nsize = ilines[1].strip().split()\n n_north = int(nsize[0])\n n_east = int(nsize[1])\n n_z = int(nsize[2])\n log_yn = nsize[4]\n\n # get nodes\n self.nodes_north = np.array(\n [float(nn) for nn in ilines[2].strip().split()]\n )\n self.nodes_east = np.array(\n [float(nn) for nn in ilines[3].strip().split()]\n )\n self.nodes_z = np.array(\n [float(nn) for nn in ilines[4].strip().split()]\n )\n\n self.res_model = np.zeros((n_north, n_east, n_z))\n\n # get model\n count_z = 0\n line_index = 6\n count_e = 0\n while count_z < n_z:\n iline = ilines[line_index].strip().split()\n # blank lines spit the depth blocks, use those as a marker to\n # set the layer number and start a new block\n if len(iline) == 0:\n count_z += 1\n count_e = 0\n line_index += 1\n # 3D grid model files don't have a space at the end\n # additional condition to account for this.\n elif (len(iline) == 3) & (count_z == n_z - 1):\n count_z += 1\n count_e = 0\n line_index += 1\n # each line in the block is a line of N-->S values for an east value\n else:\n north_line = np.array([float(nres) for nres in iline])\n\n # Need to be sure that the resistivity array matches\n # with the grids, such that the first index is the\n # furthest south\n self.res_model[:, count_e, count_z] = north_line[::-1]\n\n count_e += 1\n line_index += 1\n\n # --> get grid center and rotation angle\n if len(ilines) > line_index:\n for iline in ilines[line_index:]:\n ilist = iline.strip().split()\n # grid center\n if len(ilist) == 3:\n self.grid_center = np.array(ilist, dtype=float)\n # rotation angle\n elif len(ilist) == 1:\n self.mesh_rotation_angle = float(ilist[0])\n else:\n pass\n\n # --> make sure the resistivity units are in linear Ohm-m\n if log_yn.lower() == \"loge\":\n self.res_model = np.e**self.res_model\n elif log_yn.lower() == \"log\" or log_yn.lower() == \"log10\":\n self.res_model = 10**self.res_model\n\n # center the grids\n if self.grid_center is None:\n self.grid_center = np.array(\n [-self.nodes_north.sum() / 2, -self.nodes_east.sum() / 2, 0.0]\n )\n\n # need to shift the grid if the center is not symmetric\n # use the grid centre from the model file\n shift_north = self.grid_center[0] # + self.nodes_north.sum() / 2\n shift_east = self.grid_center[1] # + self.nodes_east.sum() / 2\n shift_z = self.grid_center[2]\n\n # shift the grid. if shift is + then that means the center is\n self.grid_north += shift_north\n self.grid_east += shift_east\n self.grid_z += shift_z\n\n # get cell size\n self.cell_size_east = stats.mode(self.nodes_east)[0][0]\n self.cell_size_north = stats.mode(self.nodes_north)[0][0]\n\n # get number of padding cells\n self.pad_east = np.where(\n self.nodes_east[0 : int(self.nodes_east.size / 2)]\n != self.cell_size_east\n )[0].size\n self.pad_north = np.where(\n self.nodes_north[0 : int(self.nodes_north.size / 2)]\n != self.cell_size_north\n )[0].size\n\n def plot_mesh(self, **kwargs):\n \"\"\"\n Plot model mesh\n\n :param plot_topography: DESCRIPTION, defaults to False\n :type plot_topography: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if \"topography\" in self.surface_dict.keys():\n kwargs[\"plot_topography\"] = True\n return PlotMesh(self, **kwargs)\n\n @property\n def model_parameters(self):\n \"\"\"\n get important model parameters to write to a file for documentation\n later.\n\n\n \"\"\"\n\n parameter_list = [\n \"cell_size_east\",\n \"cell_size_north\",\n \"ew_ext\",\n \"ns_ext\",\n \"pad_east\",\n \"pad_north\",\n \"pad_z\",\n \"pad_num\",\n \"z1_layer\",\n \"z_target_depth\",\n \"z_bottom\",\n \"mesh_rotation_angle\",\n \"res_initial_value\",\n \"save_path\",\n ]\n\n parameter_dict = {}\n for parameter in parameter_list:\n key = \"model.{0}\".format(parameter)\n parameter_dict[key] = getattr(self, parameter)\n\n parameter_dict[\"model.size\"] = self.res_model.shape\n\n return parameter_dict\n\n def write_gocad_sgrid_file(\n self, fn=None, origin=[0, 0, 0], clip=0, no_data_value=-99999\n ):\n \"\"\"\n write a model to gocad sgrid\n\n optional inputs:\n\n fn = filename to save to. File extension ('.sg') will be appended.\n default is the model name with extension removed\n origin = real world [x,y,z] location of zero point in model grid\n clip = how much padding to clip off the edge of the model for export,\n provide one integer value or list of 3 integers for x,y,z directions\n no_data_value = no data value to put in sgrid\n\n \"\"\"\n if not np.iterable(clip):\n clip = [clip, clip, clip]\n\n # determine save path\n if fn is not None:\n fn = Path(fn)\n # if fn is a full path, convert to a file name\n fndir = fn.parent\n if fndir.is_dir():\n sg_basename = fn.name\n else:\n sg_basename = fn\n else:\n # create a basename if fn is None\n sg_basename = self.model_fn.stem\n\n self.save_path, fn, sg_basename = mtfh.validate_save_file(\n save_path=self.save_path, savefile=fn, basename=sg_basename\n )\n\n # number of cells in the ModEM model\n nyin, nxin, nzin = np.array(self.res_model.shape) + 1\n\n gx, gy = mtmesh.rotate_mesh(\n self.grid_east[clip[0] : nxin - clip[0]],\n self.grid_north[clip[1] : nyin - clip[1]],\n origin[:2],\n self.mesh_rotation_angle,\n )\n\n gz = -1.0 * self.grid_z[: nzin - clip[2]] - origin[2]\n\n gxm, gzm = np.meshgrid(gx, gz)\n gym, gzm = np.meshgrid(gy, gz)\n\n gxm = gxm.reshape(len(gz), len(gy), len(gx[0])).transpose(1, 2, 0)\n gym = gym.reshape(len(gz), len(gy), len(gx[0])).transpose(1, 2, 0)\n gzm = gzm.reshape(len(gz), len(gy), len(gx[0])).transpose(1, 2, 0)\n\n gridedges = (gxm, gym, gzm)\n\n # resistivity values, clipped to one smaller than grid edges\n resvals = self.res_model[\n clip[1] : nyin - clip[1] - 1,\n clip[0] : nxin - clip[0] - 1,\n : nzin - clip[2] - 1,\n ]\n\n sg_obj = mtgocad.Sgrid(\n resistivity=resvals,\n grid_xyz=gridedges,\n fn=sg_basename,\n workdir=self.save_path,\n )\n sg_obj.write_sgrid_file()\n\n def read_gocad_sgrid_file(\n self,\n sgrid_header_file,\n air_resistivity=1e39,\n sea_resistivity=0.3,\n sgrid_positive_up=True,\n ):\n \"\"\"\n read a gocad sgrid file and put this info into a ModEM file.\n Note: can only deal with grids oriented N-S or E-W at this stage,\n with orthogonal coordinates\n\n \"\"\"\n # read sgrid file\n sg_obj = mtgocad.Sgrid()\n sg_obj.read_sgrid_file(sgrid_header_file)\n self.sg_obj = sg_obj\n\n # get resistivity model values\n self.res_model = sg_obj.resistivity\n\n # get nodes and grid locations\n grideast, gridnorth, gridz = [\n np.unique(sg_obj.grid_xyz[i]) for i in range(3)\n ]\n # check if sgrid is positive up and convert to positive down if it is\n # (ModEM grid is positive down)\n if sgrid_positive_up:\n gridz = -gridz\n\n gridz.sort()\n\n if np.all(\n np.array([len(gridnorth), len(grideast), len(gridz)]) - 1\n == np.array(self.res_model.shape)\n ):\n self.grid_east, self.grid_north, self.grid_z = (\n grideast,\n gridnorth,\n gridz,\n )\n else:\n print(\n \"Cannot read sgrid, can't deal with non-orthogonal grids or grids not aligned N-S or E-W\"\n )\n return\n\n # check if we have a data object and if we do, is there a centre position\n # if not then assume it is the centre of the grid\n calculate_centre = True\n if self.data_obj is not None:\n if hasattr(self.data_obj, \"center_point\"):\n if self.data_obj.center_point is not None:\n centre = np.zeros(3)\n centre[0] = self.data_obj.center_point[\"east\"]\n centre[1] = self.data_obj.center_point[\"north\"]\n calculate_centre = False\n # get relative grid locations\n if calculate_centre:\n print(\"Calculating center position\")\n centre = np.zeros(3)\n centre[0] = (self.grid_east.max() + self.grid_east.min()) / 2.0\n centre[1] = (self.grid_north.max() + self.grid_north.min()) / 2.0\n centre[2] = self.grid_z[0]\n\n self.grid_east -= centre[0]\n self.grid_north -= centre[1]\n\n self.grid_center = np.array(\n [self.grid_north[0], self.grid_east[0], self.grid_z[0]]\n )\n\n self.z1_layer = self.nodes_z[0]\n # self.z_target_depth = None\n self.z_bottom = self.nodes_z[-1]\n\n # number of vertical layers\n self.n_layers = len(self.grid_z) - 1\n\n # number of air layers\n self.n_airlayers = sum(\n np.amax(self.res_model, axis=(0, 1)) > 0.9 * air_resistivity\n )\n\n # sea level in grid_z coordinates, calculate and adjust centre\n self.sea_level = self.grid_z[self.n_airlayers]\n\n def interpolate_elevation(\n self,\n surface_file=None,\n surface=None,\n get_surface_name=False,\n method=\"nearest\",\n fast=True,\n shift_north=0,\n shift_east=0,\n ):\n \"\"\"\n project a surface to the model grid and add resulting elevation data\n to a dictionary called surface_dict. Assumes the surface is in lat/long\n coordinates (wgs84)\n\n **returns**\n nothing returned, but surface data are added to surface_dict under\n the key given by surface_name.\n\n **inputs**\n choose to provide either surface_file (path to file) or surface (tuple).\n If both are provided then surface tuple takes priority.\n\n surface elevations are positive up, and relative to sea level.\n surface file format is:\n\n ncols 3601\n nrows 3601\n xllcorner -119.00013888889 (longitude of lower left)\n yllcorner 36.999861111111 (latitude of lower left)\n cellsize 0.00027777777777778\n NODATA_value -9999\n elevation data W --> E\n N\n |\n V\n S\n\n Alternatively, provide a tuple with:\n (lon,lat,elevation)\n where elevation is a 2D array (shape (ny,nx)) containing elevation\n points (order S -> N, W -> E)\n and lon, lat are either 1D arrays containing list of longitudes and\n latitudes (in the case of a regular grid) or 2D arrays with same shape\n as elevation array containing longitude and latitude of each point.\n\n other inputs:\n surface_epsg = epsg number of input surface, default is 4326 for lat/lon(wgs84)\n method = interpolation method. Default is 'nearest', if model grid is\n dense compared to surface points then choose 'linear' or 'cubic'\n\n \"\"\"\n # initialise a dictionary to contain the surfaces\n if not hasattr(self, \"surface_dict\"):\n self.surface_dict = {}\n\n # get centre position of model grid in real world coordinates\n x0, y0 = (\n self.center_point.east + shift_east,\n self.center_point.north + shift_north,\n )\n\n if self.mesh_rotation_angle is None:\n self.mesh_rotation_angle = 0\n\n xg, yg = mtmesh.rotate_mesh(\n self.grid_east,\n self.grid_north,\n [x0, y0],\n self.mesh_rotation_angle,\n return_centre=True,\n )\n if surface_file:\n elev_mg = mtmesh.interpolate_elevation_to_grid(\n xg,\n yg,\n surface_file=surface_file,\n utm_epsg=self.model_epsg,\n datum_epsg=self.center_point.datum_epsg,\n method=method,\n fast=fast,\n )\n elif surface:\n # Always use fast=False when reading from EDI data because\n # we're already providing a subset of the grid.\n elev_mg = mtmesh.interpolate_elevation_to_grid(\n xg,\n yg,\n surface=surface,\n utm_epsg=self.model_epsg,\n datum_epsg=self.center_point.datum_epsg,\n method=method,\n fast=False,\n )\n else:\n raise ValueError(\"'surface_file' or 'surface' must be provided\")\n\n # get a name for surface\n if get_surface_name:\n if surface_file is not None:\n surface_file = Path(surface_file)\n surface_name = surface_file.name\n else:\n ii = 1\n surface_name = \"surface%01i\" % ii\n while surface_name in list(self.surface_dict.keys()):\n ii += 1\n surface_name = \"surface%01i\" % ii\n return elev_mg, surface_name\n else:\n return elev_mg\n\n def add_topography_from_data(\n self,\n interp_method=\"nearest\",\n air_resistivity=1e12,\n topography_buffer=None,\n airlayer_type=\"log_up\",\n ):\n \"\"\"\n Wrapper around add_topography_to_model that allows creating\n a surface model from EDI data. The Data grid and station\n elevations will be used to make a 'surface' tuple that will\n be passed to add_topography_to_model so a surface model\n can be interpolated from it.\n\n The surface tuple is of format (lon, lat, elev) containing\n station locations.\n\n Args:\n data_object (mtpy.modeling.ModEM.data.Data): A ModEm data\n object that has been filled with data from EDI files.\n interp_method (str, optional): Same as\n add_topography_to_model.\n air_resistivity (float, optional): Same as\n add_topography_to_model.\n topography_buffer (float): Same as\n add_topography_to_model.\n airlayer_type (str, optional): Same as\n add_topography_to_model.\n \"\"\"\n lon = self.station_locations.longitude.to_numpy()\n lat = self.station_locations.latitude.to_numpy()\n elev = self.station_locations.elevation.to_numpy()\n surface = lon, lat, elev\n self.add_topography_to_model(\n surface=surface,\n interp_method=interp_method,\n air_resistivity=air_resistivity,\n topography_buffer=topography_buffer,\n airlayer_type=airlayer_type,\n )\n\n def add_topography_to_model(\n self,\n topography_file=None,\n surface=None,\n topography_array=None,\n interp_method=\"nearest\",\n air_resistivity=1e12,\n topography_buffer=None,\n airlayer_type=\"log_up\",\n max_elev=None,\n shift_east=0,\n shift_north=0,\n ):\n \"\"\"\n if air_layers is non-zero, will add topo: read in topograph file,\n make a surface model.\n\n Call project_stations_on_topography in the end, which will re-write\n the .dat file.\n\n If n_airlayers is zero, then cannot add topo data, only bathymetry is needed.\n\n :param topography_file: file containing topography (arcgis ascii grid)\n :param topography_array: alternative to topography_file - array of\n elevation values on model grid\n :param interp_method: interpolation method for topography,\n 'nearest', 'linear', or 'cubic'\n :param air_resistivity: resistivity value to assign to air\n :param topography_buffer: buffer around stations to calculate minimum\n and maximum topography value to use for\n meshing\n :param airlayer_type: how to set air layer thickness - options are\n 'constant' for constant air layer thickness,\n or 'log', for logarithmically increasing air\n layer thickness upward\n \"\"\"\n # first, get surface data\n if topography_file:\n self.surface_dict[\"topography\"] = self.interpolate_elevation(\n surface_file=topography_file,\n method=interp_method,\n shift_east=shift_east,\n shift_north=shift_north,\n )\n elif surface:\n self.surface_dict[\"topography\"] = self.interpolate_elevation(\n surface=surface,\n method=interp_method,\n shift_east=shift_east,\n shift_north=shift_north,\n )\n elif topography_array:\n self.surface_dict[\"topography\"] = topography_array\n else:\n raise ValueError(\n \"'topography_file', 'surface' or \"\n + \"topography_array must be provided\"\n )\n\n if self.n_air_layers is None or self.n_air_layers == 0:\n self._logger.warn(\n \"No air layers specified, so will not add air/topography !!!\"\n )\n self._logger.warn(\n \"Only bathymetry will be added below according to the topofile: sea-water low resistivity!!!\"\n )\n\n elif (\n self.n_air_layers > 0\n ): # FZ: new logic, add equal blocksize air layers on top of the simple flat-earth grid\n # get grid centre\n gcx, gcy = [\n np.mean([arr[:-1], arr[1:]], axis=0)\n for arr in (self.grid_east, self.grid_north)\n ]\n # get core cells\n if topography_buffer is None:\n topography_buffer = (\n 5\n * (self.cell_size_east**2 + self.cell_size_north**2)\n ** 0.5\n )\n core_cells = mtmesh.get_station_buffer(\n gcx,\n gcy,\n self.station_locations[\"model_east\"],\n self.station_locations[\"model_north\"],\n buf=topography_buffer,\n )\n topo_core = self.surface_dict[\"topography\"][core_cells]\n topo_core_min = max(topo_core.min(), 0)\n\n if airlayer_type == \"log_up\":\n # log increasing airlayers, in reversed order\n new_air_nodes = mtmesh.make_log_increasing_array(\n self.z1_layer,\n topo_core.max() - topo_core_min,\n self.n_air_layers,\n increment_factor=0.999,\n )[::-1]\n elif airlayer_type == \"log_down\":\n # make a new mesh\n n_layers = self.n_layers + self.n_air_layers\n self.nodes_z, z_grid = self.make_z_mesh(n_layers)\n\n # adjust level to topography min\n if max_elev is not None:\n self.grid_z -= max_elev\n ztops = np.where(\n self.surface_dict[\"topography\"] > max_elev\n )\n self.surface_dict[\"topography\"][ztops] = max_elev\n else:\n self.grid_z -= topo_core.max()\n\n elif airlayer_type == \"constant\":\n if max_elev is not None:\n air_cell_thickness = np.ceil(\n (max_elev - topo_core_min) / self.n_air_layers\n )\n else:\n air_cell_thickness = np.ceil(\n (topo_core.max() - topo_core_min) / self.n_air_layers\n )\n new_air_nodes = np.array(\n [air_cell_thickness] * self.n_air_layers\n )\n\n if \"down\" not in airlayer_type:\n # sum to get grid cell locations\n new_airlayers = np.array(\n [\n new_air_nodes[:ii].sum()\n for ii in range(len(new_air_nodes) + 1)\n ]\n )\n # maximum topography cell on the grid\n topo_max_grid = topo_core_min + new_airlayers[-1]\n # round to nearest whole number and convert subtract the max elevation (so that sea level is at topo_core_min)\n new_airlayers = np.around(new_airlayers - topo_max_grid)\n # add new air layers, cut_off some tailing layers to preserve array size.\n self.grid_z = np.concatenate(\n [new_airlayers[:-1], self.grid_z + new_airlayers[-1]],\n axis=0,\n )\n\n self._logger.debug(\"self.grid_z[0:2] {}\".format(self.grid_z[0:2]))\n\n # update the z-centre as the top air layer\n self.grid_center[2] = self.grid_z[0]\n\n # update the resistivity model\n new_res_model = (\n np.ones(\n (\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n )\n )\n * self.res_initial_value\n )\n\n if \"down\" not in airlayer_type:\n new_res_model[:, :, self.n_air_layers :] = self.res_model\n\n self.res_model = new_res_model\n\n # assign topography\n top = np.zeros_like(self.surface_dict[\"topography\"]) + self.grid_z[0]\n bottom = -self.surface_dict[\"topography\"]\n self.assign_resistivity_from_surface_data(top, bottom, air_resistivity)\n # assign bathymetry\n self.assign_resistivity_from_surface_data(\n np.zeros_like(top), bottom, 0.3\n )\n\n return\n\n def _validate_extent(self, east, west, south, north, extent_ratio=2.0):\n \"\"\"\n validate the provided ew_ext and ns_ext to make sure the model fits\n within these extents and allows enough space for padding according to\n the extent ratio provided. If not, then update ew_ext and ns_ext parameters\n\n \"\"\"\n inner_ew_ext = west - east\n inner_ns_ext = north - south\n\n if self.ew_ext < extent_ratio * inner_ew_ext:\n self._logger.warn(\n \"Provided or default ew_ext not sufficient to fit stations + padding, updating extent\"\n )\n self.ew_ext = np.ceil(extent_ratio * inner_ew_ext)\n\n if self.ns_ext < extent_ratio * inner_ns_ext:\n self._logger.warn(\n \"Provided or default ns_ext not sufficient to fit stations + padding, updating extent\"\n )\n self.ns_ext = np.ceil(extent_ratio * inner_ns_ext)\n\n def _get_xyzres(self, location_type, origin, model_epsg, clip):\n # try getting centre location info from file\n if type(origin) == str:\n try:\n origin = np.loadtxt(origin)\n except:\n print(\n \"Please provide origin as a list, array or tuple or as a valid filename containing this info\"\n )\n origin = [0, 0]\n\n # reshape the data and get grid centres\n x, y, z = [\n np.mean([arr[1:], arr[:-1]], axis=0)\n for arr in [\n self.grid_east + origin[0],\n self.grid_north + origin[1],\n self.grid_z,\n ]\n ]\n xsize, ysize = x.shape[0], y.shape[0]\n x, y, z = np.meshgrid(\n x[clip[0] : xsize - clip[0]], y[clip[1] : ysize - clip[1]], z\n )\n\n # set format for saving data\n fmt = [\"%.1f\", \"%.1f\", \"%.3e\"]\n\n # convert to lat/long if needed\n if location_type == \"LL\":\n if np.any(origin) == 0:\n print(\n \"Warning, origin coordinates provided as zero, output lat/long are likely to be incorrect\"\n )\n # project using epsg_project as preference as it is faster, but if pyproj not installed, use gdal\n\n xp, yp = project_point(x, y, model_epsg, 4326)\n\n # update format to accommodate lat/lon\n fmt[:2] = [\"%.6f\", \"%.6f\"]\n else:\n xp, yp = x, y\n\n resvals = self.res_model[\n clip[1] : ysize - clip[1], clip[0] : xsize - clip[0]\n ]\n\n return xp, yp, z, resvals, fmt\n\n def write_xyzres(\n self,\n savefile=None,\n location_type=\"EN\",\n origin=[0, 0],\n model_epsg=None,\n log_res=False,\n model_utm_zone=None,\n clip=[0, 0],\n ):\n \"\"\"\n save a model file as a space delimited x y z res file\n\n \"\"\"\n xp, yp, z, resvals, fmt = self._get_xyzres(\n location_type, origin, model_epsg, clip\n )\n fmt.insert(2, \"%.1f\")\n xp, yp, z, resvals = (\n xp.flatten(),\n yp.flatten(),\n z.flatten(),\n resvals.flatten(),\n )\n\n np.savetxt(savefile, np.vstack([xp, yp, z, resvals]).T, fmt=fmt)\n\n def write_xyres(\n self,\n save_path=None,\n location_type=\"EN\",\n origin=[0, 0],\n model_epsg=None,\n depth_index=\"all\",\n outfile_basename=\"DepthSlice\",\n log_res=False,\n clip=[0, 0],\n ):\n \"\"\"\n write files containing depth slice data (x, y, res for each depth)\n\n origin = x,y coordinate of zero point of ModEM_grid, or name of file\n containing this info (full path or relative to model files)\n save_path = path to save to, default is the model object save path\n location_type = 'EN' or 'LL' xy points saved as eastings/northings or\n longitude/latitude, if 'LL' need to also provide model_epsg\n model_epsg = epsg number that was used to project the model\n outfile_basename = string for basename for saving the depth slices.\n log_res = True/False - option to save resistivity values as log10\n instead of linear\n clip = number of cells to clip on each of the east/west and north/south edges\n\n \"\"\"\n if save_path is None:\n save_path = Path(self.save_path)\n else:\n save_path = Path(save_path)\n # make a directory to save the files\n save_path = save_path.joinpath(outfile_basename)\n if not save_path.exists():\n save_path.mkdir()\n\n xp, yp, z, resvals, fmt = self._get_xyzres(\n location_type, origin, model_epsg, clip\n )\n xp = xp[:, :, 0].flatten()\n yp = yp[:, :, 0].flatten()\n\n # make depth indices into a list\n if depth_index == \"all\":\n depthindices = list(range(z.shape[2]))\n elif np.iterable(depth_index):\n depthindices = np.array(depth_index).astype(int)\n else:\n depthindices = [depth_index]\n\n for k in depthindices:\n fname = save_path.joinpath(\n outfile_basename + \"_%1im.xyz\" % self.grid_z[k]\n )\n\n # get relevant depth slice\n vals = resvals[:, :, k].flatten()\n\n if log_res:\n vals = np.log10(vals)\n fmt[-1] = \"%.3f\"\n data = np.vstack([xp, yp, vals]).T\n\n np.savetxt(fname, data, fmt=fmt)\n\n def write_vtk_file(\n self,\n vtk_save_path=None,\n vtk_fn_basename=\"ModEM_model_res\",\n shift_east=0,\n shift_north=0,\n shift_z=0,\n units=\"km\",\n coordinate_system=\"nez+\",\n label=\"resistivity\",\n ):\n \"\"\"\n Write a VTK file to plot in 3D rendering programs like Paraview\n\n :param vtk_save_path: directory to save vtk file to, defaults to None\n :type vtk_save_path: string or Path, optional\n :param vtk_fn_basename: filename basename of vtk file, note that .vtr\n extension is automatically added, defaults to \"ModEM_stations\"\n :type vtk_fn_basename: string, optional\n :type geographic: boolean, optional\n :param shift_east: shift in east directions in meters, defaults to 0\n :type shift_east: float, optional\n :param shift_north: shift in north direction in meters, defaults to 0\n :type shift_north: float, optional\n :param shift_z: shift in elevation + down in meters, defaults to 0\n :type shift_z: float, optional\n :param units: Units of the spatial grid [ km | m | ft ], defaults to \"km\"\n :type units: string, optional\n :type : string\n :param coordinate_system: coordinate system for the station, either the\n normal MT right-hand coordinate system with z+ down or the sinister\n z- down [ nez+ | enz- ], defaults to nez+\n :return: full path to VTK file\n :rtype: Path\n\n Write VTK file\n >>> model.write_vtk_file(vtk_fn_basename=\"modem_model\")\n\n Write VTK file in geographic coordinates with z+ up\n >>> model.write_vtk_station_file(vtk_fn_basename=\"modem_model\",\n >>> ... coordinate_system='enz-')\n \"\"\"\n\n if isinstance(units, str):\n if units.lower() == \"km\":\n scale = 1.0 / 1000.00\n elif units.lower() == \"m\":\n scale = 1.0\n elif units.lower() == \"ft\":\n scale = 3.2808\n elif isinstance(units, (int, float)):\n scale = units\n\n if vtk_save_path is None:\n vtk_fn = self.save_path.joinpath(vtk_fn_basename)\n else:\n vtk_fn = Path(vtk_save_path).joinpath(vtk_fn_basename)\n\n # use cellData, this makes the grid properly as grid is n+1\n if coordinate_system == \"nez+\":\n vtk_x = (self.grid_north + shift_north) * scale\n vtk_y = (self.grid_east + shift_east) * scale\n vtk_z = (self.grid_z + shift_z) * scale\n cell_data = {label: self.res_model}\n\n elif coordinate_system == \"enz-\":\n vtk_y = (self.grid_north + shift_north) * scale\n vtk_x = (self.grid_east + shift_east) * scale\n vtk_z = -1 * (self.grid_z + shift_z) * scale\n cell_data = {label: np.rot90(self.res_model)}\n\n gridToVTK(vtk_fn.as_posix(), vtk_x, vtk_y, vtk_z, cellData=cell_data)\n\n self._logger.info(\"Wrote model file to {}\".format(vtk_fn))\n\n def write_geosoft_xyz(\n self,\n save_fn,\n c_east=0,\n c_north=0,\n c_z=0,\n pad_north=0,\n pad_east=0,\n pad_z=0,\n ):\n \"\"\"\n Write an XYZ file readable by Geosoft\n\n All input units are in meters.\n\n :param save_fn: full path to save file to\n :type save_fn: string or Path\n :param c_east: center point in the east direction, defaults to 0\n :type c_east: float, optional\n :param c_north: center point in the north direction, defaults to 0\n :type c_north: float, optional\n :param c_z: center point elevation, defaults to 0\n :type c_z: float, optional\n :param pad_north: number of cells to cut from the north-south edges, defaults to 0\n :type pad_north: int, optional\n :param pad_east: number of cells to cut from the east-west edges, defaults to 0\n :type pad_east: int, optional\n :param pad_z: number of cells to cut from the bottom, defaults to 0\n :type pad_z: int, optional\n\n\n \"\"\"\n lines = [\n r\"/ ------------------------------------------------------------------------------\",\n r\"/ XYZ IMPORT [01/25/2021]\",\n r\"/ VOXEL [.\\electrical_resistivity.geosoft_voxel]\",\n r\"/ ------------------------------------------------------------------------------\",\n r\"/ X,Y,Z,Data\",\n ]\n\n # --> write model xyz file\n for kk, zz in enumerate(self.grid_z[0:-pad_z]):\n for jj, yy in enumerate(self.grid_east[pad_east:-pad_east]):\n for ii, xx in enumerate(self.grid_north[pad_north:-pad_north]):\n lines.append(\n f\"{yy + c_east:.3f} {xx + c_north:.3f} {-(zz + c_z):.3f} {self.res_model[ii, jj, kk]:.3f}\"\n )\n\n with open(save_fn, \"w\") as fid:\n fid.write(\"\\n\".join(lines))\n\n def write_out_file(\n self, save_fn, geographic_east, geographic_north, geographic_elevation\n ):\n \"\"\"\n will write an .out file for LeapFrog.\n\n Note that y is assumed to be S --> N, e is assumed to be W --> E and\n z is positive upwards. This means that index [0, 0, 0] is the\n southwest corner of the first layer.\n\n :param save_fn: full path to save file to\n :type save_fn: string or Path\n :param geographic_east: geographic center in easting (meters)\n :type geographic_east: float\n :param geographic_north: geographic center in northing (meters)\n :type geographic_north: float\n :param geographic_elevation: elevation of geographic center (meters)\n :type geographic_elevation: float\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n # get resistivity model\n if self.res_model is None:\n self.res_model = np.zeros(\n (\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n )\n )\n self.res_model[:, :, :] = self.res_initial_value\n\n elif type(self.res_model) in [float, int]:\n self.res_initial_value = self.res_model\n self.res_model = np.zeros(\n (\n self.nodes_north.size,\n self.nodes_east.size,\n self.nodes_z.size,\n )\n )\n self.res_model[:, :, :] = self.res_initial_value\n\n shift_east = (\n geographic_east\n - (\n self.nodes_east[0]\n - self.nodes_east[1] / 2\n - self.grid_center[1] / 2\n )\n ) / 1000.0\n shift_north = (\n geographic_north\n + (\n self.nodes_north[0]\n - self.nodes_north[1] / 2\n - self.grid_center[0] / 2\n )\n ) / 1000.0\n\n shift_elevation = geographic_elevation / 1000.0\n\n # --> write file\n with open(save_fn, \"w\") as ifid:\n ifid.write(\"\\n\")\n ifid.write(\n \"{0:>5}{1:>5}{2:>5}{3:>5} {4}\\n\".format(\n self.nodes_east.size,\n self.nodes_north.size,\n self.nodes_z.size,\n 0,\n \"VAL\",\n )\n )\n\n # write S --> N node block\n for ii, nnode in enumerate(self.nodes_east):\n ifid.write(\"{0:>12.3f}\".format(abs(nnode)))\n\n ifid.write(\"\\n\")\n\n # write W --> E node block\n for jj, enode in enumerate(self.nodes_north):\n ifid.write(\"{0:>12.3f}\".format(abs(enode)))\n ifid.write(\"\\n\")\n\n # write top --> bottom node block\n for kk, zz in enumerate(self.nodes_z):\n ifid.write(\"{0:>12.3f}\".format(abs(zz)))\n ifid.write(\"\\n\")\n\n # write the resistivity in log e format\n write_res_model = self.res_model[::-1, :, :]\n\n # write out the layers from resmodel\n count = 1\n for zz in range(self.nodes_z.size):\n ifid.write(f\"{count}\\n\")\n for nn in range(self.nodes_north.size):\n for ee in range(self.nodes_east.size):\n ifid.write(\n \"{0:>13.5E}\".format(write_res_model[nn, ee, zz])\n )\n ifid.write(\"\\n\")\n count += 1\n\n # write footer\n ifid.write(\"\\n\")\n ifid.write(\"WINGLINK\\n\")\n ifid.write(\" Project (site name)\\n\")\n ifid.write(\" 1 1 (i j block numbers)\\n\")\n ifid.write(\n f\" {shift_east:.3f} {shift_north:.3f} (real world coordinates)\\n\"\n )\n ifid.write(\" 0.0000000E+00 (rotation)\\n\")\n ifid.write(f\" {shift_elevation:.3f} (top elevation)\\n\")\n ifid.write(\"\\n\")\n\n self._logger.info(\"Wrote file to: {0}\".format(save_fn))\n\n def write_ubc_files(self, basename, c_east=0, c_north=0, c_z=0):\n \"\"\"\n Write a UBC .msh and .mod file\n\n :param save_fn: DESCRIPTION\n :type save_fn: TYPE\n :param c_east: DESCRIPTION, defaults to 0\n :type c_east: TYPE, optional\n :param c_north: DESCRIPTION, defaults to 0\n :type c_north: TYPE, optional\n :param c_z: DESCRIPTION, defaults to 0\n :type c_z: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n\n .. note:: not complete yet.\n \"\"\"\n\n # write mesh first\n lines = [\n f\"{self.nodes_east.size} {self.nodes_north.size} {self.nodes_z.size}\"\n ]\n lines.append(\n str(self.nodes_east.tolist())\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\",\", \"\")\n )\n lines.append(\n str(self.nodes_north.tolist())\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\",\", \"\")\n )\n lines.append(\n str(self.nodes_z.tolist())\n .replace(\"[\", \"\")\n .replace(\"]\", \"\")\n .replace(\",\", \"\")\n )\n\n with open(self.save_path.joinpath(basename + \".msh\"), \"w\") as fid:\n fid.write(\"\\n\".join(lines))" } ]
from pathlib import Path from loguru import logger from .exception import CovarianceError from .model import Model from pyevtk.hl import gridToVTK import numpy as np
19,948
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Covariance(object): """ read and write covariance files """ def __init__(self, grid_dimensions=None, **kwargs): self._logger = logger self.grid_dimensions = grid_dimensions self.smoothing_east = 0.3 self.smoothing_north = 0.3 self.smoothing_z = 0.3 self.smoothing_num = 1 self.exception_list = [] self.mask_arr = None self.save_path = Path().cwd() self.fn_basename = "covariance.cov" self._header_str = "\n".join( [ "+{0}+".format("-" * 77), "| This file defines model covariance for a recursive autoregression scheme. |", "| The model space may be divided into distinct areas using integer masks. |", "| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |", "| air, ocean and the rest of the model is turned off automatically. You can |", "| also define exceptions to override smoothing between any two model areas. |", "| To turn off smoothing set it to zero. This header is 16 lines long. |", "| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |", "| 2. Smoothing in the X direction (NzEarth real values) |", "| 3. Smoothing in the Y direction (NzEarth real values) |", "| 4. Vertical smoothing (1 real value) |", "| 5. Number of times the smoothing should be applied (1 integer >= 0) |", "| 6. Number of exceptions (1 integer >= 0) |", "| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |", "| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|", "+{0}+".format("-" * 77), ] ) for key in list(kwargs.keys()): if hasattr(self, key): setattr(self, key, kwargs[key]) else: self._logger.warn( "Argument {}={} is not supportted thus not been set.".format( key, kwargs[key] ) ) @property def cov_fn(self): return self.save_path.joinpath(self.fn_basename) @cov_fn.setter def cov_fn(self, value): if value is not None: value = Path(value) self.save_path = value.parent self.fn_basename = value.name def write_covariance_file( self, cov_fn=None, save_path=None, fn_basename=None, model_fn=None, sea_water=0.3, air=1e12, ): # """ write a covariance file """ if model_fn is not None:
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Covariance(object): """ read and write covariance files """ def __init__(self, grid_dimensions=None, **kwargs): self._logger = logger self.grid_dimensions = grid_dimensions self.smoothing_east = 0.3 self.smoothing_north = 0.3 self.smoothing_z = 0.3 self.smoothing_num = 1 self.exception_list = [] self.mask_arr = None self.save_path = Path().cwd() self.fn_basename = "covariance.cov" self._header_str = "\n".join( [ "+{0}+".format("-" * 77), "| This file defines model covariance for a recursive autoregression scheme. |", "| The model space may be divided into distinct areas using integer masks. |", "| Mask 0 is reserved for air; mask 9 is reserved for ocean. Smoothing between |", "| air, ocean and the rest of the model is turned off automatically. You can |", "| also define exceptions to override smoothing between any two model areas. |", "| To turn off smoothing set it to zero. This header is 16 lines long. |", "| 1. Grid dimensions excluding air layers (Nx, Ny, NzEarth) |", "| 2. Smoothing in the X direction (NzEarth real values) |", "| 3. Smoothing in the Y direction (NzEarth real values) |", "| 4. Vertical smoothing (1 real value) |", "| 5. Number of times the smoothing should be applied (1 integer >= 0) |", "| 6. Number of exceptions (1 integer >= 0) |", "| 7. Exceptions in the for e.g. 2 3 0. (to turn off smoothing between 3 & 4) |", "| 8. Two integer layer indices and Nx x Ny block of masks, repeated as needed.|", "+{0}+".format("-" * 77), ] ) for key in list(kwargs.keys()): if hasattr(self, key): setattr(self, key, kwargs[key]) else: self._logger.warn( "Argument {}={} is not supportted thus not been set.".format( key, kwargs[key] ) ) @property def cov_fn(self): return self.save_path.joinpath(self.fn_basename) @cov_fn.setter def cov_fn(self, value): if value is not None: value = Path(value) self.save_path = value.parent self.fn_basename = value.name def write_covariance_file( self, cov_fn=None, save_path=None, fn_basename=None, model_fn=None, sea_water=0.3, air=1e12, ): # """ write a covariance file """ if model_fn is not None:
mod_obj = Model()
1
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResult:\n if parsing_type is None:\n return ParseResult(parsed_outputs={}, error=False, error_log=None)\n if raw_output is None:\n return ParseResult(parsed_outputs={}, error=True, error_log=\"No content\")\n parsing_pattern = get_pattern_by_type(parsing_type)\n whole_pattern = parsing_pattern[\"whole\"]\n parsed_results = re.findall(whole_pattern, raw_output, flags=re.DOTALL)\n parsed_outputs = {}\n error: bool = False\n error_log: str = None\n\n try:\n for parsed_result in parsed_results:\n key = parsed_result[0]\n type_str = parsed_result[1]\n value = convert_str_to_type(parsed_result[2], type_str)\n parsed_outputs[key] = value\n except Exception as e:\n error = True\n error_log = str(e)\n\n return ParseResult(\n parsed_outputs=parsed_outputs,\n error=error,\n error_log=error_log,\n )\n\n def __validate_openai_messages(\n self, messages: List[Dict[str, str]]\n ) -> List[OpenAIMessage]:\n \"\"\"Validate and convert list of dictionaries to list of OpenAIMessage.\"\"\"\n res = []\n for message in messages:\n res.append(OpenAIMessage(**message))\n return res\n\n def run(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n response = None\n if functions == []:\n functions = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[ChatCompletionMessageToolCall] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream(\n self,\n messages: List[Dict[str, str]], # input\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Stream openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n # load_prompt()\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def run_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Parse and return output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Generate openai chat completion asynchronously, and parse the output.\n Example prompt is as follows:\n -----\n Given a topic, you are required to generate a story.\n You must follow the provided output format.\n\n Topic:\n {topic}\n\n Output format:\n [Story]\n ...\n [/Story]\n\n Now generate the output:\n \"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False\n error_log = None\n\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ): # only get the last api_response, not delta response\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n for chunk in self.__double_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n else:\n for chunk in self.__single_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False # error in stream time\n error_log = None\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ):\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n async for chunk in self.__double_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n else:\n async for chunk in self.__single_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def make_model_response(\n self,\n chunk: ModelResponse,\n response_ms,\n messages: List[Dict[str, str]],\n raw_output: str,\n functions: Optional[List[Any]] = None,\n function_call: Optional[Dict[str, Any]] = None,\n tools: Optional[List[Any]] = None,\n tool_calls: Optional[List[Dict[str, Any]]] = None,\n ) -> ModelResponse:\n count_start_time = datetime.datetime.now()\n prompt_token: int = num_tokens_for_messages(\n messages=messages, model=chunk[\"model\"]\n )\n completion_token: int = num_tokens_for_messages(\n model=chunk[\"model\"],\n messages=[{\"role\": \"assistant\", \"content\": raw_output}],\n )\n\n if functions and len(functions) > 0:\n functions_token = num_tokens_from_functions_input(\n functions=functions, model=chunk[\"model\"]\n )\n prompt_token += functions_token\n\n if tools and len(tools) > 0:\n tools_token = num_tokens_from_functions_input(\n functions=[tool[\"function\"] for tool in tools], model=chunk[\"model\"]\n )\n prompt_token += tools_token\n # if function_call:\n # function_call_token = num_tokens_from_function_call_output(\n # function_call_output=function_call, model=chunk[\"model\"]\n # )\n # completion_token += function_call_token\n\n count_end_time = datetime.datetime.now()\n logger.debug(\n f\"counting token time : {(count_end_time - count_start_time).total_seconds() * 1000} ms\"\n )\n\n usage = Usage(\n **{\n \"prompt_tokens\": prompt_token,\n \"completion_tokens\": completion_token,\n \"total_tokens\": prompt_token + completion_token,\n }\n )\n\n last_message = Message(\n role=chunk.choices[0].delta.role\n if getattr(chunk.choices[0].delta, \"role\", None)\n else \"assistant\",\n content=raw_output if raw_output != \"\" else None,\n function_call=function_call if function_call else None,\n tool_calls=tool_calls if tool_calls else None,\n )\n choices = [\n Choices(finish_reason=chunk.choices[0].finish_reason, message=last_message)\n ]\n\n res = ModelResponse(\n id=chunk[\"id\"],\n created=chunk[\"created\"],\n model=chunk[\"model\"],\n stream=True,\n )\n res.choices = choices\n res.usage = usage\n res._response_ms = response_ms\n\n return res\n\n def __llm_stream_response_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n try:\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n raw_output += chunk.choices[0].delta.content\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=chunk.choices[0].delta.content,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __single_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __double_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __llm_stream_response_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n try:\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __single_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __double_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n # if len(keys) > 1:\n # yield LLMStreamResponse(\n # error=True,\n # error_log=\"Parsing error : Nested key detected\",\n # )\n # break\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n # break\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))" }, { "identifier": "DeployedPrompt", "path": "promptmodel/database/models.py", "snippet": "class DeployedPrompt(BaseModel):\n id = AutoField()\n version_uuid = ForeignKeyField(\n DeployedFunctionModelVersion,\n field=DeployedFunctionModelVersion.uuid,\n backref=\"prompts\",\n on_delete=\"CASCADE\",\n )\n role = CharField()\n step = IntegerField()\n content = TextField()" }, { "identifier": "DeployedFunctionModel", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModel(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n name = CharField()" }, { "identifier": "DeployedFunctionModelVersion", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModelVersion(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n version = IntegerField(null=False)\n from_version = IntegerField(null=True)\n function_model_uuid = ForeignKeyField(\n DeployedFunctionModel,\n field=DeployedFunctionModel.uuid,\n backref=\"versions\",\n on_delete=\"CASCADE\",\n )\n model = CharField()\n is_published = BooleanField(default=False)\n is_ab_test = BooleanField(default=False)\n ratio = FloatField(null=True)\n parsing_type = CharField(\n null=True,\n default=None,\n constraints=[\n Check(\n f\"parsing_type IN ('{ParsingType.COLON.value}', '{ParsingType.SQUARE_BRACKET.value}', '{ParsingType.DOUBLE_SQUARE_BRACKET.value}')\"\n )\n ],\n )\n output_keys = JSONField(null=True, default=None)\n functions = JSONField(default=[])" }, { "identifier": "get_deployed_prompts", "path": "promptmodel/database/crud.py", "snippet": "def get_deployed_prompts(function_model_name: str) -> Tuple[List[DeployedPrompt], str]:\n try:\n with db.atomic():\n versions: List[DeployedFunctionModelVersion] = list(\n DeployedFunctionModelVersion.select()\n .join(DeployedFunctionModel)\n .where(\n DeployedFunctionModelVersion.function_model_uuid\n == DeployedFunctionModel.get(\n DeployedFunctionModel.name == function_model_name\n ).uuid\n )\n )\n prompts: List[DeployedPrompt] = list(\n DeployedPrompt.select()\n .where(\n DeployedPrompt.version_uuid.in_(\n [version.uuid for version in versions]\n )\n )\n .order_by(DeployedPrompt.step.asc())\n )\n # select version by ratio\n selected_version = select_version_by_ratio(\n [version.__data__ for version in versions]\n )\n selected_prompts = list(\n filter(\n lambda prompt: str(prompt.version_uuid.uuid)\n == str(selected_version[\"uuid\"]),\n prompts,\n )\n )\n\n version_details = {\n \"model\": selected_version[\"model\"],\n \"version\" : selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n return selected_prompts, version_details\n except Exception as e:\n logger.error(e)\n return None, None" }, { "identifier": "CacheManager", "path": "promptmodel/promptmodel_init.py", "snippet": "class CacheManager:\n _instance = None\n _lock = threading.Lock()\n\n def __new__(cls):\n with cls._lock:\n if cls._instance is None:\n instance = super(CacheManager, cls).__new__(cls)\n instance.last_update_time = 0 # to manage update frequency\n instance.update_interval = 60 * 60 * 6 # seconds, 6 hours\n instance.program_alive = True\n instance.background_tasks = []\n initialize_db()\n atexit.register(instance._terminate)\n asyncio.run(instance.update_cache()) # updae cache first synchronously\n instance.cache_thread = threading.Thread(\n target=instance._run_cache_loop\n )\n instance.cache_thread.daemon = True\n instance.cache_thread.start()\n cls._instance = instance\n return cls._instance\n\n def cache_update_background_task(self, config):\n asyncio.run(update_deployed_db(config))\n\n def _run_cache_loop(self):\n asyncio.run(self._update_cache_periodically())\n\n async def _update_cache_periodically(self):\n while True:\n await asyncio.sleep(self.update_interval) # Non-blocking sleep\n await self.update_cache()\n\n async def update_cache(self):\n # Current time\n current_time = time.time()\n config = read_config()\n\n if not config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n if \"project\" not in config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n if \"version\" not in config[\"project\"]:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n # Check if we need to update the cache\n if current_time - self.last_update_time > self.update_interval:\n # Update cache logic\n try:\n await update_deployed_db(config)\n except:\n # try once more\n await update_deployed_db(config)\n # Update the last update time\n self.last_update_time = current_time\n\n def _terminate(self):\n self.program_alive = False\n\n # async def cleanup_background_tasks(self):\n # for task in self.background_tasks:\n # if not task.done():\n # task.cancel()\n # try:\n # await task\n # except asyncio.CancelledError:\n # pass # 작업이 취소됨" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "select_version_by_ratio", "path": "promptmodel/utils/random_utils.py", "snippet": "def select_version_by_ratio(versions):\n epsilon = 1e-10\n ratios = [version[\"ratio\"] for version in versions]\n\n if not abs(sum(ratios) - 1.0) <= epsilon:\n raise ValueError(f\"Sum of ratios must be 1.0, now {sum(ratios)}\")\n\n cumulative_ratios = []\n cumulative_sum = 0\n for ratio in ratios:\n cumulative_sum += ratio\n cumulative_ratios.append(cumulative_sum)\n\n random_value = random.random()\n for idx, cumulative_ratio in enumerate(cumulative_ratios):\n if random_value <= cumulative_ratio:\n return versions[idx]" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "num_tokens_for_messages_for_each", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_for_messages_for_each(\n messages: List[Dict[str, str]], model: str = \"gpt-3.5-turbo-0613\"\n) -> List[int]:\n processed_messages = [\n {**message, \"function_call\": str(message[\"function_call\"])}\n if \"function_call\" in message\n else message\n for message in messages\n ]\n processed_messages = [\n {**message, \"tool_calls\": str(message[\"tool_calls\"])}\n if \"tool_calls\" in message\n else message\n for message in processed_messages\n ]\n return [\n token_counter(model=model, messages=[message]) for message in processed_messages\n ]" }, { "identifier": "num_tokens_from_functions_input", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_from_functions_input(\n functions: Optional[List[Any]] = None, model=\"gpt-3.5-turbo-0613\"\n) -> int:\n \"\"\"Return the number of tokens used by a list of functions.\"\"\"\n if functions is None:\n return 0\n num_tokens = 0\n for function in functions:\n function_tokens = token_counter(model=model, text=function[\"name\"])\n function_tokens += token_counter(model=model, text=function[\"description\"])\n\n if \"parameters\" in function:\n parameters = function[\"parameters\"]\n if \"properties\" in parameters:\n for properties_key in parameters[\"properties\"]:\n function_tokens += token_counter(model=model, text=properties_key)\n v = parameters[\"properties\"][properties_key]\n for field in v:\n if field == \"type\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"type\"]\n )\n elif field == \"description\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"description\"]\n )\n elif field == \"enum\":\n function_tokens -= 3\n for o in v[\"enum\"]:\n function_tokens += 3\n function_tokens += token_counter(model=model, text=o)\n else:\n print(f\"Warning: not supported field {field}\")\n function_tokens += 11\n\n num_tokens += function_tokens\n\n num_tokens += 12\n return num_tokens" }, { "identifier": "update_dict", "path": "promptmodel/utils/output_utils.py", "snippet": "def update_dict(\n target: Dict[str, str],\n source: Dict[str, str],\n):\n for key, value in source.items():\n if value is not None:\n if key not in target:\n target[key] = value\n else:\n target[key] += value\n return target" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "FunctionModelConfig", "path": "promptmodel/types/response.py", "snippet": "class FunctionModelConfig(BaseModel):\n \"\"\"Response Class for FunctionModel.get_config()\n prompts: List[Dict[str, Any]] = []\n each prompt can have role, content, name, function_call, and tool_calls\n version_detail: Dict[str, Any] = {}\n version_detail has \"model\", \"uuid\", \"parsing_type\" and \"output_keys\".\n model: str\n model name (e.g. \"gpt-3.5-turbo\")\n name: str\n name of the FunctionModel.\n version_uuid: str\n version uuid of the FunctionModel.\n version: int\n version id of the FunctionModel.\n parsing_type: Optional[str] = None\n parsing type of the FunctionModel.\n output_keys: Optional[List[str]] = None\n output keys of the FunctionModel.\n \"\"\"\n\n prompts: List[Dict[str, Any]]\n model: str\n name: str\n version_uuid: str\n version: int\n parsing_type: Optional[str] = None\n output_keys: Optional[List[str]] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "UnitConfig", "path": "promptmodel/types/response.py", "snippet": "class UnitConfig(BaseModel):\n \"\"\"Response Class for UnitLogger.get_config().\n Created after calling UnitLogger.log_start()\n name: str\n name of the UnitLogger.\n version_uuid: str\n version uuid of the UnitLogger.\n version: int\n version id of the UnitLogger.\n log_uuid: str\n log_uuid for current trace.\n \"\"\"\n\n name: str\n version_uuid: str\n log_uuid: str\n version: int" }, { "identifier": "PMDetail", "path": "promptmodel/types/response.py", "snippet": "class PMDetail(BaseModel):\n model: str\n name: str\n version_uuid: str\n version: int\n log_uuid: str" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" } ]
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
19,740
def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, }
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) ) return wrapper def _wrap_async_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call async_gen with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) log_uuid = str(uuid4()) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None api_response: Optional[ModelResponse] = None async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item # # add string_cache in model_response # if api_response: # if "message" not in api_response.choices[0]: # api_response.choices[0].message = {} # if "content" not in api_response.choices[0].message: # api_response.choices[0].message["content"] = string_cache # api_response.choices[0].message["role"] = "assistant" metadata = { "error": error_occurs, "error_log": error_log, } await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=api_response, parsed_outputs=dict_cache, metadata=metadata, ) # raise Exception("error_log") return wrapper def _wrap_method(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) ) else: run_async_in_sync( self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_method(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = await LLMProxy.fetch_prompts( self._name, self.version ) # messages, model, uuid = self._fetch_prompts() call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } log_uuid = str(uuid4()) if llm_response.parsed_outputs: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs=llm_response.parsed_outputs, metadata=metadata, ) else: await self._async_log_to_cloud( log_uuid=log_uuid, version_uuid=version_details["uuid"], inputs=inputs, api_response=llm_response.api_response, parsed_outputs={}, metadata=metadata, ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( message=llm_response.api_response.choices[ 0 ].message.model_dump(), uuid=log_uuid, metadata=metadata, api_response=api_response, ) ], ) ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return wrapper def _wrap_async_chat(self, method: Callable[..., Any]) -> Callable[..., Any]: async def async_wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the method with the arguments llm_response: LLMResponse = await method(**call_args) error_occurs = llm_response.error error_log = llm_response.error_log metadata = { "error": error_occurs, "error_log": error_log, } api_response = None if llm_response.api_response: api_response = llm_response.api_response log_uuid = str(uuid4()) await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=llm_response.api_response.choices[ 0 ].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) if error_occurs: # delete all promptmodel data in llm_response llm_response.raw_output = None llm_response.parsed_outputs = None llm_response.function_call = None llm_response.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) return llm_response return async_wrapper def _wrap_chat_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(session_uuid: str, **kwargs): instruction, version_details, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) ) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } run_async_in_sync( self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) ) return wrapper def _wrap_async_chat_gen(self, async_gen: Callable[..., Any]) -> Callable[..., Any]: async def wrapper(session_uuid: str, **kwargs): ( instruction, version_details, message_logs, ) = await LLMProxy.fetch_chat_model(self._name, session_uuid, self.version) call_args = self._prepare_call_args_for_chat( message_logs, version_details, kwargs ) # Call the generator with the arguments stream_response: AsyncGenerator[LLMStreamResponse, None] = async_gen( **call_args ) api_response = None error_occurs = False error_log = None log_uuid = str(uuid4()) async for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None item.pm_detail = PMDetail( model=version_details["model"], name=self._name, version_uuid=str(version_details["uuid"]), version=version_details["version"], log_uuid=log_uuid, ) yield item metadata = { "error": error_occurs, "error_log": error_log, } await self._async_chat_log_to_cloud( session_uuid=session_uuid, version_uuid=version_details["uuid"], chat_log_request_list=[ ChatLogRequest( uuid=log_uuid, message=api_response.choices[0].message.model_dump(), metadata=metadata, api_response=api_response, ) ], ) return wrapper def _prepare_call_args( self, prompts: List[Dict[str, str]], version_detail: Dict[str, Any], inputs: Dict[str, Any], kwargs, ): stringified_inputs = {key: str(value) for key, value in inputs.items()} messages = [ { "content": prompt["content"].format(**stringified_inputs), "role": prompt["role"], } for prompt in prompts ] call_args = { "messages": messages, "model": version_detail["model"] if version_detail else None, "parsing_type": version_detail["parsing_type"] if version_detail else None, "output_keys": version_detail["output_keys"] if version_detail else None, } if call_args["parsing_type"] is None: del call_args["parsing_type"] del call_args["output_keys"] if "functions" in kwargs: call_args["functions"] = kwargs["functions"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] return call_args def _prepare_call_args_for_chat( self, messages: List[Dict[str, Any]], version_detail: Dict[str, Any], kwargs, ): call_args = {} token_per_tools = 0 if "functions" in kwargs: call_args["functions"] = kwargs["functions"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["functions"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) if "tools" in kwargs: call_args["tools"] = kwargs["tools"] token_per_tools = num_tokens_from_functions_input( functions=kwargs["tools"], model=version_detail["model"] if version_detail else "gpt-3.5-turbo", ) # truncate messages to make length <= model's max length model_max_tokens = get_max_tokens( model=version_detail["model"] if version_detail else "gpt-3.5-turbo" ) token_per_messages = num_tokens_for_messages_for_each( messages, version_detail["model"] ) token_limit_exceeded = ( sum(token_per_messages) + token_per_tools ) - model_max_tokens if token_limit_exceeded > 0: while token_limit_exceeded > 0: # erase the second oldest message (first one is system prompt, so it should not be erased) if len(messages) == 1: # if there is only one message, Error cannot be solved. Just call LLM and get error response break token_limit_exceeded -= token_per_messages[1] del messages[1] del token_per_messages[1] call_args["messages"] = messages call_args["model"] = version_detail["model"] if version_detail else None if "api_key" in kwargs: call_args["api_key"] = kwargs["api_key"] if "tools" in kwargs: call_args["tools"] = kwargs["tools"] return call_args async def _async_log_to_cloud( self, version_uuid: str, log_uuid: str, inputs: Optional[Dict] = None, api_response: Optional[ModelResponse] = None, parsed_outputs: Optional[Dict] = None, metadata: Optional[Dict] = None, ): config = read_config() if ( "project" in config and "mask_inputs" in config["project"] and config["project"]["mask_inputs"] == True ): inputs = {key: "PRIVATE LOGGING" for key, value in inputs.items()} # Perform the logging asynchronously if api_response: api_response_dict = api_response.model_dump() api_response_dict["response_ms"] = api_response._response_ms api_response_dict["_response_ms"] = api_response._response_ms else: api_response_dict = None run_log_request_body = { "uuid": log_uuid, "api_response": api_response_dict, "inputs": inputs, "parsed_outputs": parsed_outputs, "metadata": metadata, }
res = await AsyncAPIClient.execute(
14
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
algs/greedy.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a batch norm calibration object. Only used in\n testing (not training or validation).\n \"\"\"\n\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Optional[Any] = None,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n self.obj_func = obj_func\n self.logger = logger\n self.prompt_dataset = prompt_dataset\n\n self.bn_calibrator = BatchNormCalibrate() if use_bn_calibrator else None\n self.n_samples_bn_calibrator = n_samples_bn_calibrator\n\n @abc.abstractmethod\n def train(self, train_data: Iterable[Any]):\n raise NotImplementedError()\n\n def validate(self, val_dataset: Iterable[Any], best_str_list: List[str]) -> str:\n t_dataset = val_dataset\n if self.logger is not None:\n self.logger.info(\"total val dataset length: %s\", len(t_dataset))\n val_acc_list = []\n\n for prompt in best_str_list:\n n_correct = 0\n\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size,\n (batch_idx + 1) * self.eval_batch_size,\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n _, _, batch_acc = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n prompt,\n True,\n \"infer\",\n verbose=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", prompt)\n self.logger.info(\"final val acc: %s\", (n_correct / len(t_dataset)))\n val_acc_list.append(float(n_correct / len(t_dataset)))\n # best_prompt = best_str_list[np.argmax(val_acc_list)]\n max_acc = np.max(val_acc_list)\n indices = np.argwhere(val_acc_list == max_acc)\n last_index = indices[-1][0]\n best_prompt = best_str_list[last_index]\n if self.logger is not None:\n self.logger.info(\"val acc list: %s\", val_acc_list)\n self.logger.info(\"best prompt: %s\", best_prompt)\n self.logger.info(\"best prompt acc: %s\", np.max(val_acc_list))\n\n return best_prompt\n\n def test(\n self,\n test_dataset,\n best_prompt,\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n best_prompt,\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n _,\n _,\n _,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n best_prompt,\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n torch.cuda.empty_cache()\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n if self.logger is not None:\n self.logger.info(\"prompt: %s\", best_prompt)\n self.logger.info(n_correct)\n self.logger.info(\"final test acc: %s\", (n_correct / len(t_dataset)))\n if return_logits:\n return n_correct / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return n_correct / len(t_dataset), None\n\n def manual(\n self,\n test_dataset: Iterable[Any],\n bn_calibrate_if_available: bool = True,\n return_logits: bool = False,\n ) -> Tuple[float, Optional[Dict[str, torch.Tensor]]]:\n t_dataset = test_dataset\n for i in range(self.n_classes):\n test_I = [x for x in t_dataset if x[\"label\"] == i]\n if self.logger is not None:\n self.logger.info(\n \"total test dataset length: %s for class %s\", len(test_I), i\n )\n if self.logger is not None:\n self.logger.info(\"total test dataset length: %s\", len(t_dataset))\n n_correct = 0\n sum_ece = 0\n sum_entropy = 0\n class_correct = collections.Counter((i, 0) for i in range(self.n_classes))\n\n if self.bn_calibrator is not None and bn_calibrate_if_available:\n # select some samples for calibration\n idx_calibrate = np.random.choice(\n len(test_dataset),\n min(len(test_dataset), self.n_samples_bn_calibrator),\n replace=False,\n )\n\n calibrate_data = [t_dataset[int(i)] for i in idx_calibrate]\n (\n t_premise_texts,\n t_hypothesis,\n _,\n ) = self.prompt_dataset.get_data(calibrate_data)\n\n # Initialize the bn calibrator\n self.bn_calibrator.train()\n # Get the logits\n calibrate_logits = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n [0] * len(t_premise_texts), # dummy class labels\n \"\",\n to_tensor=True,\n mode=\"infer\",\n accumulate_class=True,\n )[-1]\n # Run the prediction logits only through the BN calibrator to obtain\n # running statistics.\n self.bn_calibrator(calibrate_logits[0], flush=True)\n self.bn_calibrator.eval()\n self.obj_func.bn_calibrator = self.bn_calibrator\n else:\n calibrate_logits = None\n\n all_logits: List[torch.Tensor] = []\n all_labels: List[int] = []\n for batch_idx in range(0, len(t_dataset) // self.eval_batch_size + 1):\n idx = np.arange(\n batch_idx * self.eval_batch_size, (batch_idx + 1) * self.eval_batch_size\n )\n idx = [_idx for _idx in idx if _idx < len(t_dataset)]\n\n if len(idx) == 0:\n break\n\n t_data = [t_dataset[int(i)] for i in idx]\n (\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n ) = self.prompt_dataset.get_data(t_data)\n\n torch.cuda.empty_cache()\n (\n _,\n _,\n batch_acc,\n count_class,\n batch_ece,\n batch_entropy,\n class_logits,\n ) = self.obj_func.forward(\n t_premise_texts,\n t_hypothesis,\n t_class_labels,\n \"\",\n True,\n \"infer\",\n verbose=True,\n accumulate_class=True,\n )\n n_correct += batch_acc * len(idx)\n sum_ece += batch_ece * len(idx)\n sum_entropy += batch_entropy * len(idx)\n class_correct += count_class[0]\n if return_logits:\n all_logits.append(class_logits[0])\n all_labels += t_class_labels\n # print(count_class)\n torch.cuda.empty_cache()\n # print(class_correct)\n if self.logger is not None:\n self.logger.info(\n \"manual prompt test acc: %s\", (float(n_correct) / len(t_dataset))\n )\n self.logger.info(\"count class: %s\", class_correct)\n self.logger.info(\n \"manual prompt test ece percent: %s\",\n (float(sum_ece) / len(t_dataset) * 100),\n )\n self.logger.info(\n \"manual prompt test entropy: %s\", (float(sum_entropy) / len(t_dataset))\n )\n if return_logits:\n return float(n_correct) / len(t_dataset), {\n \"output_logits\": torch.cat(all_logits),\n \"calibrate_logits\": calibrate_logits,\n \"labels\": all_labels,\n }\n return float(n_correct) / len(t_dataset), None" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" } ]
import random import numpy as np from typing import Any, Optional from rewards.text_classification_reward import ( PromptedClassificationReward, ) from utils.fsc_datasets import PromptedClassificationDataset from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
18,703
class GreedyTrainer(BaseTrainer): def __init__( self,
class GreedyTrainer(BaseTrainer): def __init__( self,
obj_func: PromptedClassificationReward,
4
2023-10-08 12:39:44+00:00
24k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self, devices, load_pretrained=True) :\n '''Create network'''\n\n cf = self.cf\n self.devices = devices\n size_token_info = 6\n self.fields_coupling_idx = []\n\n self.fields_index = {}\n for ifield, field_info in enumerate(cf.fields) :\n self.fields_index[ field_info[0] ] = ifield \n \n # # embedding network for global/auxiliary token infos\n # TODO: only for backward compatibility, remove\n self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)\n torch.nn.init.constant_( self.embed_token_info.weight, 0.0)\n\n self.embeds_token_info = torch.nn.ModuleList()\n for ifield, field_info in enumerate( cf.fields) :\n \n self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net))\n \n if len(field_info[1]) > 4 and load_pretrained :\n # TODO: inconsistent with embeds_token_info -> version that can handle both\n # we could imply use the file name: embed_token_info vs embeds_token_info\n name = 'AtmoRep' + '_embed_token_info'\n mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1]))\n self.embeds_token_info[-1].load_state_dict( mloaded)\n print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) )\n else :\n # initalization\n torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0)\n self.embeds_token_info[-1].bias.data.fill_(0.0)\n\n # embedding and encoder\n\n self.embeds = torch.nn.ModuleList()\n self.encoders = torch.nn.ModuleList()\n self.masks = torch.nn.ParameterList()\n\n for field_idx, field_info in enumerate(cf.fields) : \n\n # learnabl class token\n if cf.learnable_mask :\n mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True))\n self.masks.append( mask.to(devices[0]))\n else :\n self.masks.append( None)\n\n # encoder\n self.encoders.append( TransformerEncoder( cf, field_idx, True).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'encoder', self.encoders[-1])\n self.embeds.append( self.encoders[-1].embed)\n\n # indices of coupled fields for efficient access in forward\n self.fields_coupling_idx.append( [field_idx])\n for field_coupled in field_info[1][2] : \n if 'axial' in cf.encoder_att_type :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n else :\n for _ in range(cf.coupling_num_heads_per_field) :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n\n # decoder \n\n self.decoders = torch.nn.ModuleList()\n self.field_pred_idxs = []\n for field in cf.fields_prediction :\n\n for ifield, field_info in enumerate(cf.fields) : \n if field_info[0] == field[0] :\n self.field_pred_idxs.append( ifield)\n break\n\n self.decoders.append( TransformerDecoder( cf, field_info ) )\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'decoder', self.decoders[-1])\n\n # tail networks\n \n self.tails = torch.nn.ModuleList()\n for ifield, field in enumerate(cf.fields_prediction) :\n\n field_idx = self.field_pred_idxs[ifield]\n field_info = cf.fields[field_idx]\n self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained:\n self.load_block( field_info, 'tail', self.tails[-1])\n\n # set devices\n\n for field_idx, field_info in enumerate(cf.fields) :\n # find determined device, use default if nothing specified\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n assert field_info[1][3] < 4, 'Only single node model parallelism supported'\n assert field_info[1][3] < len(devices), 'Per field device id larger than max devices'\n device = self.devices[ field_info[1][3] ]\n # set device\n if self.masks[field_idx] != None :\n self.masks[field_idx].to(device)\n self.embeds[field_idx].to(device)\n self.encoders[field_idx].to(device)\n\n for field_idx, field in enumerate(cf.fields_prediction) :\n field_info = cf.fields[ self.field_pred_idxs[field_idx] ]\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n device = self.devices[ field_info[1][3] ]\n self.decoders[field_idx].to(device)\n self.tails[field_idx].to(device)\n\n # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal\n self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove\n self.embeds_token_info.to(devices[0])\n\n self.checkpoint = identity\n if cf.grad_checkpointing :\n self.checkpoint = checkpoint_wrapper\n\n return self\n\n ###################################################\n def load_block( self, field_info, block_name, block ) :\n\n # name = self.__class__.__name__ + '_' + block_name + '_' + field_info[0]\n name = 'AtmoRep_' + block_name + '_' + field_info[0]\n\n b_loaded = torch.load( get_model_filename(name, field_info[1][4][0], field_info[1][4][1]))\n\n # in coupling mode, proj_out of attention heads needs separate treatment: only the pre-trained\n # part can be loaded\n keys_del = []\n for name, param in block.named_parameters():\n if 'proj_out' in name :\n for k in b_loaded.keys() :\n if name == k :\n if param.shape[0] != param.shape[1] : # non-square proj_out indicate deviation from pre-training\n with torch.no_grad() :\n # load pre-trained part\n param[ : , : b_loaded[k].shape[1] ] = b_loaded[k]\n # initalize remaining part to small random value\n param[ : , b_loaded[k].shape[1] : ] = 0.01 * torch.rand( param.shape[0],\n param.shape[1] - b_loaded[k].shape[1])\n keys_del += [ k ]\n for k in keys_del :\n del b_loaded[k]\n\n # use strict=False so that differing blocks, e.g. through coupling, are ignored\n mkeys, _ = block.load_state_dict( b_loaded, False)\n\n # missing keys = keys that are not pre-trained are initalized to small value\n [mkeys.remove(k) for k in keys_del] # remove proj_out keys so that they are not over-written\n [utils.init_weights_uniform( block.state_dict()[k], 0.01) for k in mkeys]\n\n print( 'Loaded {} for {} from id = {} (ignoring/missing {} elements).'.format( block_name,\n field_info[0], field_info[1][4][0], len(mkeys) ) )\n\n ###################################################\n @staticmethod\n def load( model_id, devices, cf = None, epoch = -2, load_pretrained=False) :\n '''Load network from checkpoint'''\n\n if not cf : \n cf = utils.Config()\n cf.load_json( model_id)\n\n model = AtmoRep( cf).create( devices, load_pretrained=False)\n mloaded = torch.load( utils.get_model_filename( model, model_id, epoch) )\n mkeys, _ = model.load_state_dict( mloaded, False )\n\n if len(mkeys) > 0 :\n print( f'Loaded AtmoRep: ignoring {len(mkeys)} elements: {mkeys}')\n\n # TODO: remove, only for backward \n if model.embeds_token_info[0].weight.abs().max() == 0. :\n model.embeds_token_info = torch.nn.ModuleList()\n\n return model\n \n ###################################################\n def save( self, epoch = -2) :\n '''Save network '''\n\n # save entire network\n torch.save( self.state_dict(), utils.get_model_filename( self, self.cf.wandb_id, epoch) )\n\n # save parts also separately\n\n # name = self.__class__.__name__ + '_embed_token_info'\n # torch.save( self.embed_token_info.state_dict(),\n # utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n name = self.__class__.__name__ + '_embeds_token_info'\n torch.save( self.embeds_token_info.state_dict(),\n utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, enc in enumerate(self.encoders) :\n name = self.__class__.__name__ + '_encoder_' + self.cf.fields[ifield][0]\n torch.save( enc.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, dec in enumerate(self.decoders) :\n name = self.__class__.__name__ + '_decoder_' + self.cf.fields_prediction[ifield][0]\n torch.save( dec.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, tail in enumerate(self.tails) :\n name = self.__class__.__name__ + '_tail_' + self.cf.fields_prediction[ifield][0]\n torch.save( tail.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n ###################################################\n def forward( self, xin) :\n '''Evaluate network'''\n\n # embedding\n cf = self.cf\n fields_embed = self.get_fields_embed(xin)\n \n # attention maps (if requested)\n atts = [ [] for _ in cf.fields ]\n\n # encoder\n embeds_layers = [[] for i in self.field_pred_idxs]\n for ib in range(self.cf.encoder_num_layers) :\n fields_embed, att = self.forward_encoder_block( ib, fields_embed) \n [embeds_layers[idx].append( fields_embed[i]) for idx,i in enumerate(self.field_pred_idxs)]\n [atts[i].append( att[i]) for i,_ in enumerate(cf.fields) ]\n \n # encoder-decoder coupling / token transformations\n (decoders_in, embeds_layers) = self.encoder_to_decoder( embeds_layers)\n\n preds = []\n for idx,i in enumerate(self.field_pred_idxs) :\n \n # decoder\n token_seq_embed, att = self.decoders[idx]( (decoders_in[idx], embeds_layers[idx]) )\n \n # tail net\n tail_in = self.decoder_to_tail( idx, token_seq_embed)\n pred = self.checkpoint( self.tails[idx], tail_in)\n \n preds.append( pred)\n [atts[i].append( a) for a in att]\n\n return preds, atts\n\n ###################################################\n def forward_encoder_block( self, iblock, fields_embed) :\n ''' evaluate one block (attention and mlp) '''\n\n # double buffer for commutation-invariant result (w.r.t evaluation order of transformers)\n fields_embed_cur, atts = [], []\n\n # attention heads\n for ifield in range( len(fields_embed)) :\n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n # unpack list in argument for checkpointing\n y, att = self.checkpoint( self.encoders[ifield].heads[iblock], *fields_in)\n fields_embed_cur.append( y)\n atts.append( att)\n \n # MLPs \n for ifield in range( len(fields_embed)) :\n fields_embed_cur[ifield] = self.checkpoint( self.encoders[ifield].mlps[iblock], \n fields_embed_cur[ifield] )\n \n return fields_embed_cur, atts\n\n ###################################################\n \n def get_fields_embed( self, xin ) :\n cf = self.cf\n if 0 == len(self.embeds_token_info) : # TODO: only for backward compatibility, remove\n emb_net_ti = self.embed_token_info\n return [prepare_token( field_data, emb_net, emb_net_ti, cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n else :\n embs_net_ti = self.embeds_token_info\n return [prepare_token( field_data, emb_net, embs_net_ti[fidx], cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n \n ###################################################\n\n def get_attention( self, xin) : \n\n cf = self.cf\n attn = []\n fields_embed = self.get_fields_embed(xin)\n #either accumulated attention or last layer attention:\n blocks = list(range(self.cf.encoder_num_layers)) if cf.attention_mode == 'accum' else [self.cf.encoder_num_layers-1]\n for idx, ifield in enumerate(self.field_pred_idxs) : \n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n attn_field = self.encoders[ifield].heads[blocks[0]].get_attention(fields_in)\n if cf.attention_mode == 'accum':\n for iblock in blocks[1:]:\n attn_layer = self.encoders[ifield].heads[iblock].get_attention(fields_in)\n attn_field = attn_field + attn_layer\n attn_field = torch.sum(attn_field, dim = 0, keepdim=True)\n attn.append(attn_field)\n# print(\"att FINAL\", ifield, len(attn), attn[0].shape)\n return attn" }, { "identifier": "AtmoRepData", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRepData( torch.nn.Module) :\n\n def __init__( self, net) :\n '''Wrapper class for AtmoRep that handles data loading'''\n\n super( AtmoRepData, self).__init__()\n \n self.data_loader_test = None\n self.data_loader_train = None\n self.data_loader_iter = None\n\n self.net = net\n\n # ensure that all data loaders have the same seed and hence load the same data\n self.rng_seed = net.cf.rng_seed \n if not self.rng_seed :\n self.rng_seed = int(torch.randint( 100000000, (1,))) \n \n ###################################################\n def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) :\n '''Load data'''\n\n cf = self.net.cf\n \n if batch_size < 0 :\n batch_size = cf.batch_size_max\n if num_loader_workers < 0 :\n num_loader_workers = cf.num_loader_workers\n\n if mode == NetMode.train :\n self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers)\n elif mode == NetMode.test :\n batch_size = cf.batch_size_test\n self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers)\n else : \n assert False\n\n ###################################################\n def _load_data( self, dataset, batch_size, num_loader_workers) :\n '''Private implementation for load'''\n\n dataset.load_data( batch_size)\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': num_loader_workers, 'pin_memory': True}\n data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) \n\n return data_loader\n\n ###################################################\n def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_data( times_pos, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_global( times, batch_size, cf.token_overlap)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, \n batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) :\n '''Private implementation for set_data, set_global'''\n\n cf = self.net.cf\n if loader_workers < 0 :\n loader_workers = cf.num_loader_workers\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': loader_workers, 'pin_memory': True}\n \n if mode == NetMode.train :\n self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n elif mode == NetMode.test :\n self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n else :\n assert False\n\n ###################################################\n def normalizer( self, field, vl_idx) :\n\n if isinstance( field, str) :\n for fidx, field_info in enumerate(self.cf.fields) :\n if field == field_info[0] :\n break\n assert fidx < len(self.cf.fields), 'invalid field'\n normalizer = self.dataset_train.datasets[fidx].normalizer\n\n elif isinstance( field, int) :\n normalizer = self.dataset_train.datasets[field][vl_idx].normalizer\n\n else :\n assert False, 'invalid argument type (has to be index to cf.fields or field name)'\n\n return normalizer\n\n ###################################################\n def mode( self, mode : NetMode) :\n \n if mode == NetMode.train :\n self.data_loader_iter = iter(self.data_loader_train)\n self.net.train()\n elif mode == NetMode.test :\n self.data_loader_iter = iter(self.data_loader_test)\n self.net.eval()\n else :\n assert False\n\n self.cur_mode = mode\n\n ###################################################\n def len( self, mode : NetMode) :\n if mode == NetMode.train :\n return len(self.data_loader_train)\n elif mode == NetMode.test :\n return len(self.data_loader_test)\n else :\n assert False\n\n ###################################################\n def next( self) :\n return next(self.data_loader_iter)\n\n ###################################################\n def forward( self, xin) :\n pred = self.net.forward( xin)\n return pred\n\n ###################################################\n def get_attention( self, xin): #, field_idx) :\n attn = self.net.get_attention( xin) #, field_idx)\n return attn\n\n ###################################################\n def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None,\n load_pretrained=True) :\n\n if create_net :\n self.net.create( devices, load_pretrained)\n\n self.pre_batch = pre_batch\n self.pre_batch_targets = pre_batch_targets\n\n cf = self.net.cf\n self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields,\n batch_size = cf.batch_size_start,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_train,\n num_load = cf.num_files_train,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n \n self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields,\n batch_size = cf.batch_size_test,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_test,\n num_load = cf.num_files_test,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n lat_sampling_weighted = cf.lat_sampling_weighted,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n\n return self" }, { "identifier": "prepare_batch_BERT_multifield", "path": "atmorep/training/bert.py", "snippet": "def prepare_batch_BERT_multifield( cf, rngs, fields, BERT_strategy, fields_data) :\n \n fields_tokens_masked_idx = [[] for _ in fields_data]\n fields_tokens_masked_idx_list = [[] for _ in fields_data]\n fields_targets = [[] for _ in fields_data]\n sources = [[] for _ in fields_data]\n token_infos = [[] for _ in fields_data]\n\n if not BERT_strategy :\n BERT_strategy = cf.BERT_strategy\n\n if BERT_strategy == 'BERT' :\n bert_f = prepare_batch_BERT_field\n elif BERT_strategy == 'forecast' :\n bert_f = prepare_batch_BERT_forecast_field\n elif BERT_strategy == 'temporal_interpolation' :\n bert_f = prepare_batch_BERT_temporal_field\n elif BERT_strategy == 'forecast_1shot' :\n bert_f = prepare_batch_BERT_forecast_field_1shot\n elif BERT_strategy == 'identity' :\n bert_f = prepare_batch_BERT_identity_field\n elif BERT_strategy == 'totalmask' :\n bert_f = prepare_batch_BERT_totalmask_field\n else :\n assert False\n\n # # advance randomly to avoid issues with parallel data loaders that naively duplicate rngs\n # delta = torch.randint( 0, 1000, (1,)).item()\n # [rng.bit_generator.advance( delta) for rng in rngs]\n\n if cf.BERT_window :\n # window size has to be multiple of two due to the variable token sizes (the size is \n # however currently restricted to differ by exactly a factor of two only)\n size_t = int(rngs[0].integers( 2, fields[0][3][0]+1, 1)[0] / 2.) * 2 \n size_lat = int(rngs[0].integers( 2, fields[0][3][1]+1, 1)[0] / 2.) * 2\n size_lon = int(rngs[0].integers( 2, fields[0][3][2]+1, 1)[0] / 2.) * 2\n\n rng_idx = 1\n for ifield, data_field in enumerate(fields_data) :\n for ilevel, (field_data, token_info) in enumerate(data_field) :\n\n tok_size = fields[ifield][4]\n field_data = tokenize( field_data, tok_size )\n field_data_shape = field_data.shape\n \n # cut neighborhood for current batch\n if cf.BERT_window :\n # adjust size based on token size so that one has a fixed size window in physical space\n cur_size_t = int(size_t * fields[ifield][3][0] / fields[0][3][0])\n cur_size_lat = int(size_lat * fields[ifield][3][1] / fields[0][3][1])\n cur_size_lon = int(size_lon * fields[ifield][3][2] / fields[0][3][2])\n # define indices\n idx_t_s = field_data.shape[1] - cur_size_t\n idx_lat_s = field_data.shape[2] - cur_size_lat\n idx_lon_s = field_data.shape[3] - cur_size_lon\n # cut\n field_data = field_data[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n field_data = field_data.contiguous()\n # for token info first recover space-time shape\n token_info = token_info.reshape( list(field_data_shape[0:4]) + [token_info.shape[-1]]) \n token_info = token_info[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n token_info = torch.flatten( token_info, 1, -2)\n token_info = token_info.contiguous()\n \n # no masking for static fields or if masking rate = 0\n if fields[ifield][1][0] > 0 and fields[ifield][5][0] > 0. :\n\n ret = bert_f( cf, ifield, field_data, token_info, rngs[rng_idx])\n (field_data, token_info, target, tokens_masked_idx, tokens_masked_idx_list) = ret\n \n if target != None :\n fields_targets[ifield].append( target)\n fields_tokens_masked_idx[ifield].append( tokens_masked_idx)\n fields_tokens_masked_idx_list[ifield].append( tokens_masked_idx_list)\n\n rng_idx += 1\n\n sources[ifield].append( field_data.unsqueeze(1) )\n token_infos[ifield].append( token_info )\n\n # merge along vertical level\n sources[ifield] = torch.cat( sources[ifield], 1)\n token_infos[ifield] = torch.cat( token_infos[ifield], 1)\n # merge along vertical level, for target we have level, batch, ... ordering \n fields_targets[ifield] = torch.cat( fields_targets[ifield],0) \\\n if len(fields_targets[ifield]) > 0 else fields_targets[ifield]\n\n return (sources, token_infos, fields_targets, fields_tokens_masked_idx,\n fields_tokens_masked_idx_list)" }, { "identifier": "positional_encoding_harmonic", "path": "atmorep/transformer/transformer_base.py", "snippet": "def positional_encoding_harmonic( x, num_levels, num_tokens, with_cls = False) :\n '''space time harmonic positional encoding'''\n\n dim_embed = x.shape[-1]\n dev = x.get_device()\n \n # num_tokens = x.shape[-3:-1]\n # len_token_seq = num_levels * np.prod(num_tokens)\n # pe = torch.zeros( len_token_seq, dim_embed, device=dev)\n # position = torch.arange( 0, len_token_seq).unsqueeze(1)\n # div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(1000) / dim_embed))\n\n # pe[:, 0::2] = torch.sin(position * div)\n # pe[:, 1::2] = torch.cos(position * div)\n # pe = pe.unsqueeze(0)\n\n # x += pe.reshape( x[0].shape )\n\n\n idx = torch.arange( np.prod( x.shape[1:-1]), device=dev)\n num_tokens_t_lat_lon = np.prod( num_tokens)\n num_tokens_lat_lon = num_tokens[1] * num_tokens[2]\n idxs_v = (idx / num_tokens_t_lat_lon).int()\n # idxs_v = num_tokens_t_lat_lon\n temp = torch.remainder( idx, num_tokens_t_lat_lon)\n idxs_t = (temp / num_tokens_lat_lon).int()\n temp = torch.remainder( idx, num_tokens_lat_lon)\n idxs_lat = (temp / num_tokens[1]).int()\n idxs_lon = torch.remainder( temp, num_tokens[2])\n\n pe = torch.zeros( idx.shape[0], dim_embed, device=dev)\n xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed)\n pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * idxs_lat, xs) ) \\\n + torch.sin( torch.outer( idxs_t, xs) )\n pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * idxs_lon, xs) ) \\\n + torch.cos( torch.outer( idxs_v , xs) )\n if with_cls :\n x[:,1:] += pe.reshape( x[0,1:].shape)\n else :\n x += pe.reshape( x[0].shape)\n\n return x" }, { "identifier": "shape_to_str", "path": "atmorep/utils/utils.py", "snippet": "def shape_to_str( shape) :\n ret ='{}'.format( list(shape)).replace(' ', '').replace(',','_').replace('(','s_').replace(')','')\n ret = ret.replace('[','s_').replace(']','')\n return ret" }, { "identifier": "relMSELoss", "path": "atmorep/utils/utils.py", "snippet": "def relMSELoss( pred, target = None) :\n val = torch.mean( (pred - target) * (pred - target)) / torch.mean( target * target)\n return val" }, { "identifier": "Gaussian", "path": "atmorep/utils/utils.py", "snippet": "def Gaussian( x, mu=0., std_dev=1.) :\n # return (1 / (std_dev*np.sqrt(2.*np.pi))) * torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))\n # unnormalized Gaussian where maximum is one\n return torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))" }, { "identifier": "CRPS", "path": "atmorep/utils/utils.py", "snippet": "def CRPS( y, mu, std_dev) :\n # see Eq. A2 in S. Rasp and S. Lerch. Neural networks for postprocessing ensemble weather forecasts. Monthly Weather Review, 146(11):3885 – 3900, 2018.\n c1 = np.sqrt(1./np.pi)\n t1 = 2. * erf( (y-mu) / std_dev) - 1.\n t2 = 2. * Gaussian( (y-mu) / std_dev)\n val = std_dev * ( (y-mu)/std_dev * t1 + t2 - c1 )\n return val" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "identifier": "sgn_exp", "path": "atmorep/utils/utils.py", "snippet": "def sgn_exp( x ) :\n '''exponential preserving sign'''\n return x.sign() * (torch.exp( x.abs() ) - 1.)" }, { "identifier": "write_forecast", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_forecast( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n ''' \n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n ds_field = exp_target.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n ds_field = exp_pred.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n ds_field = exp_ens.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_ens.close()" }, { "identifier": "write_BERT", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_BERT( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n '''\n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n # fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch}.zarr'\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels[fidx])\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][0][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][0][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_field = exp_target.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_target_b = ds_field.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_target_b_l = ds_target_b.require_group( f'ml={levels[fidx][vidx]}')\n ds_target_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_target_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_target_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_pred = exp_pred.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_pred_b = ds_pred.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_pred_b_l = ds_pred_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_pred_b_l.create_dataset( 'data', data\n =field[1][vidx][bidx])\n ds_pred_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_pred_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_ens = exp_ens.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_ens_b = ds_ens.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_ens_b_l = ds_ens_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_ens_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_ens_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_ens_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_ens.close()" }, { "identifier": "write_attention", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_attention(model_id, epoch, batch_idx, levels, attn, attn_coords, zarr_store_type = 'ZipStore' ) :\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_attn = zarr_store( fname.format( 'attention'))\n exp_attn = zarr.group(store=store_attn)\n\n for fidx, atts_f in enumerate(attn) :\n ds_field = exp_attn.require_group( f'{atts_f[0]}')\n ds_field_b = ds_field.require_group( f'batch={batch_idx:05d}')\n for lidx, atts_f_l in enumerate(atts_f[1]) : # layer in the network\n ds_f_l = ds_field_b.require_group( f'layer={lidx:05d}')\n ds_f_l.create_dataset( 'ml', data=levels[fidx])\n ds_f_l.create_dataset( 'datetime', data=attn_coords[0][fidx])\n ds_f_l.create_dataset( 'lat', data=attn_coords[1][fidx])\n ds_f_l.create_dataset( 'lon', data=attn_coords[2][fidx])\n ds_f_l_h = ds_f_l.require_group('heads')\n for hidx, atts_f_l_head in enumerate(atts_f_l) : # number of attention head\n if atts_f_l_head != None :\n ds_f_l_h.create_dataset(f'{hidx}', data=atts_f_l_head.numpy() )\n store_attn.close()" } ]
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
15,091
# target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields] write_BERT( cf.wandb_id, epoch, batch_idx, levels, sources_out, [sources_dates_out, sources_lats_out, sources_lons_out], targets_out, [targets_dates_out, targets_lats_out, targets_lons_out], preds_out, ensembles_out ) def log_attention( self, epoch, bidx, log) : '''Hook for logging: output attention maps.''' cf = self.cf attention, token_infos = log attn_dates_out, attn_lats_out, attn_lons_out = [ ], [ ], [ ] attn_out = [] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) coords_b = [] for tinfo in tinfos : # use first vertical levels since a column is considered res = tinfo[0,0,0,0,-1] lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res*token_size[1]) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res*token_size[2]), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res*token_size[2]) lons = np.remainder( lons, 360.) dates = np.array([(utils.token_info_to_time(tinfo[0,t,0,0,:3])) for t in range(tinfo.shape[1])], dtype='datetime64[s]') coords_b += [ [dates, lats, lons] ] if is_predicted: attn_out.append([field_info[0], attention[fidx]]) attn_dates_out.append([c[0] for c in coords_b]) attn_lats_out.append( [c[1] for c in coords_b]) attn_lons_out.append( [c[2] for c in coords_b]) else: attn_dates_out.append( [] ) attn_lats_out.append( [] ) attn_lons_out.append( [] ) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses : stats_loss = Gaussian( target, pred[0], pred[1]) diff = (stats_loss-1.) # stats_loss = 0.01 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) stats_loss = torch.mean( diff * diff) + torch.mean( torch.sqrt( torch.abs( pred[1])) ) losses['stats'].append( stats_loss) # Generalized cross entroy loss for continuous distributions if 'stats_area' in self.cf.losses : diff = torch.abs( torch.special.erf( (target - pred[0]) / (pred[1] * pred[1])) ) stats_area = 0.2 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) losses['stats_area'].append( stats_area) # CRPS score if 'crps' in self.cf.losses : crps_loss = torch.mean( CRPS( target, pred[0], pred[1])) losses['crps'].append( crps_loss) loss = torch.tensor( 0., device=self.device_out) for key in losses : # print( 'LOSS : {} :: {}'.format( key, losses[key])) for ifield, val in enumerate(losses[key]) : loss += self.loss_weights[ifield] * val.to( self.device_out) loss /= len(self.cf.fields_prediction) * len( self.cf.losses) mse_loss = mse_loss_total / len(self.cf.fields_prediction) return loss, mse_loss, losses #################################################################################################### class Trainer_BERT( Trainer_Base) : ################################################### def __init__( self, cf, devices) : Trainer_Base.__init__( self, cf, devices) self.rng_seed = cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) # TODO: generate only rngs that are needed ll = len(cf.fields) * 8 #len(cf.vertical_levels) if cf.BERT_fields_synced : self.rngs = [np.random.default_rng(self.rng_seed) for _ in range(ll)] else : self.rngs = [np.random.default_rng(self.rng_seed+i) for i in range(ll)] # batch preprocessing to be done in loader (mainly for performance reasons since it's # parallelized there) self.pre_batch = functools.partial( prepare_batch_BERT_multifield, self.cf, self.rngs, self.cf.fields, self.cf.BERT_strategy ) ################################################### def prepare_batch( self, xin) : '''Move data to device and some additional final preprocessing before model eval''' cf = self.cf devs = self.devices # unpack loader output # xin[0] since BERT does not have targets (sources, token_infos, targets, fields_tokens_masked_idx,fields_tokens_masked_idx_list) = xin[0] # network input batch_data = [ ( sources[i].to( devs[ cf.fields[i][1][3] ], non_blocking=True), self.tok_infos_trans(token_infos[i]).to( self.devices[0], non_blocking=True)) for i in range(len(sources)) ] # store token number since BERT selects sub-cube (optionally) self.num_tokens = [] for field_idx in range(len(batch_data)) : self.num_tokens.append( list(batch_data[field_idx][0].shape[2:5])) # target self.targets = [] for ifield in self.fields_prediction_idx : self.targets.append( targets[ifield].to( devs[cf.fields[ifield][1][3]], non_blocking=True )) # idxs of masked tokens tmi_out = [[] for _ in range(len(fields_tokens_masked_idx))] for i,tmi in enumerate(fields_tokens_masked_idx) : tmi_out[i] = [tmi_l.to( devs[cf.fields[i][1][3]], non_blocking=True) for tmi_l in tmi] self.tokens_masked_idx = tmi_out # idxs of masked tokens per batch entry self.fields_tokens_masked_idx_list = fields_tokens_masked_idx_list # learnable class token (cannot be done in the data loader since this is running in parallel) if cf.learnable_mask : for ifield, (source, _) in enumerate(batch_data) : source = torch.flatten( torch.flatten( torch.flatten( source, 1, 4), 2, 4), 0, 1) assert len(cf.fields[ifield][2]) == 1 tmidx = self.tokens_masked_idx[ifield][0] source[ tmidx ] = self.model.net.masks[ifield].to( source.device) return batch_data ################################################### def encoder_to_decoder( self, embeds_layers) : return ([embeds_layers[i][-1] for i in range(len(embeds_layers))] , embeds_layers ) ################################################### def decoder_to_tail( self, idx_pred, pred) : '''Positional encoding of masked tokens for tail network evaluation''' field_idx = self.fields_prediction_idx[idx_pred] dev = self.devices[ self.cf.fields[field_idx][1][3] ] target_idx = self.tokens_masked_idx[field_idx] assert len(target_idx) > 0, 'no masked tokens but target variable' # select "fixed" masked tokens for loss computation # recover vertical level dimension num_tokens = self.num_tokens[field_idx] num_vlevels = len(self.cf.fields[field_idx][2]) # flatten token dimensions: remove space-time separation pred = torch.flatten( pred, 2, 3).to( dev) # extract masked token level by level pred_masked = [] for lidx, level in enumerate(self.cf.fields[field_idx][2]) : # select masked tokens, flattened along batch dimension for easier indexing and processing pred_l = torch.flatten( pred[:,lidx], 0, 1) pred_masked_l = pred_l[ target_idx[lidx] ] target_idx_l = target_idx[lidx] # add positional encoding of masked tokens # # TODO: do we need the positional encoding? # compute space time indices of all tokens target_idxs_v = level * torch.ones( target_idx_l.shape[0], device=dev) num_tokens_space = num_tokens[1] * num_tokens[2] # remove offset introduced by linearization target_idx_l = torch.remainder( target_idx_l, np.prod(num_tokens)) target_idxs_t = (target_idx_l / num_tokens_space).int() temp = torch.remainder( target_idx_l, num_tokens_space) target_idxs_x = (temp / num_tokens[1]).int() target_idxs_y = torch.remainder( temp, num_tokens[2]) # apply harmonic positional encoding dim_embed = pred.shape[-1] pe = torch.zeros( pred_masked_l.shape[0], dim_embed, device=dev) xs = (2. * np.pi / dim_embed) * torch.arange( 0, dim_embed, 2, device=dev) pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * target_idxs_x, xs) ) \ + torch.sin( torch.outer( target_idxs_t, xs) ) pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * target_idxs_y, xs) ) \ + torch.cos( torch.outer( target_idxs_v, xs) ) # TODO: with or without final positional encoding? # pred_masked.append( pred_masked_l + pe) pred_masked.append( pred_masked_l) # flatten along level dimension, for loss evaluation we effectively have level, batch, ... # as ordering of dimensions pred_masked = torch.cat( pred_masked, 0) return pred_masked ################################################### def log_validate( self, epoch, bidx, log_sources, log_preds) : '''Hook for logging: output associated with concrete training strategy.''' if not hasattr( self.cf, 'wandb_id') : return if 'forecast' == self.cf.BERT_strategy : self.log_validate_forecast( epoch, bidx, log_sources, log_preds) elif 'BERT' == self.cf.BERT_strategy : self.log_validate_BERT( epoch, bidx, log_sources, log_preds) else : assert False ################################################### def log_validate_forecast( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=forecast.''' cf = self.cf detok = utils.detokenize # TODO, TODO: for 6h forecast we need to iterate over predicted token slices # save source: remains identical so just save ones (sources, token_infos, targets, _, _) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] # reconstruct geo-coords (identical for all fields) forecast_num_tokens = 1 if hasattr( cf, 'forecast_num_tokens') : forecast_num_tokens = cf.forecast_num_tokens num_tokens = cf.fields[0][3] token_size = cf.fields[0][4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) lats, lons = [ ], [ ] for tinfo in token_infos[0] : lat_min, lat_max = tinfo[0][4], tinfo[ num_tokens[1]*num_tokens[2]-1 ][4] lon_min, lon_max = tinfo[0][5], tinfo[ num_tokens[1]*num_tokens[2]-1 ][5] res = tinfo[0][-1] lat = torch.arange( lat_min - lat_d_h*res, lat_max + lat_d_h*res + 0.001, res) if lon_max < lon_min : lon = torch.arange( lon_min - lon_d_h*res, 360. + lon_max + lon_d_h*res + 0.001, res) else : lon = torch.arange( lon_min - lon_d_h*res, lon_max + lon_d_h*res + 0.001, res) lats.append( lat.numpy()) lons.append( torch.remainder( lon, 360.).numpy()) # check that last token (bottom right corner) has the expected coords # assert np.allclose( ) # extract dates for each token entry, constant for each batch and field dates_t = [] for b_token_infos in token_infos[0] : dates_t.append(utils.token_info_to_time(b_token_infos[0])-pd.Timedelta(hours=token_size[0]-1)) # TODO: check that last token matches first one # process input fields for fidx, field_info in enumerate(cf.fields) : # reshape from tokens to contiguous physical field num_levels = len(field_info[2]) source = detok( sources[fidx].cpu().detach().numpy()) # recover tokenized shape target = detok( targets[fidx].cpu().detach().numpy().reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # TODO: check that geo-coords match to general ones that have been pre-determined for bidx in range(token_infos[fidx].shape[0]) : for vidx, _ in enumerate(field_info[2]) : denormalize = self.model.normalizer( fidx, vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] source[bidx,vidx] = denormalize( date.year, date.month, source[bidx,vidx], coords) target[bidx,vidx] = denormalize( date.year, date.month, target[bidx,vidx], coords) # append sources_out.append( [field_info[0], source]) targets_out.append( [field_info[0], target]) # process predicted fields for fidx, fn in enumerate(cf.fields_prediction) : # field_info = cf.fields[ self.fields_prediction_idx[fidx] ] num_levels = len(field_info[2]) # predictions pred = log_preds[fidx][0].cpu().detach().numpy() pred = detok( pred.reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # ensemble ensemble = log_preds[fidx][2].cpu().detach().numpy() ensemble = detok( ensemble.reshape( [ -1, cf.net_tail_num_nets, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ]) ) # denormalize for bidx in range(token_infos[fidx].shape[0]) : for vidx, vl in enumerate(field_info[2]) : denormalize = self.model.normalizer( self.fields_prediction_idx[fidx], vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] pred[bidx,vidx] = denormalize( date.year, date.month, pred[bidx,vidx], coords) ensemble[bidx,:,vidx] = denormalize(date.year, date.month, ensemble[bidx,:,vidx], coords) # append preds_out.append( [fn[0], pred]) ensembles_out.append( [fn[0], ensemble]) # generate time range dates_sources, dates_targets = [ ], [ ] for bidx in range( source.shape[0]) : r = pd.date_range( start=dates_t[bidx], periods=source.shape[2], freq='h') dates_sources.append( r.to_pydatetime().astype( 'datetime64[s]') ) dates_targets.append( dates_sources[-1][ -forecast_num_tokens*token_size[0] : ] ) levels = np.array(cf.fields[0][2]) lats = [90.-lat for lat in lats] write_forecast( cf.wandb_id, epoch, batch_idx, levels, sources_out, [dates_sources, lats, lons], targets_out, [dates_targets, lats, lons], preds_out, ensembles_out ) ################################################### def log_validate_BERT( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=BERT.''' cf = self.cf detok = utils.detokenize # save source: remains identical so just save ones (sources, token_infos, targets, tokens_masked_idx, tokens_masked_idx_list) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] sources_dates_out, sources_lats_out, sources_lons_out = [ ], [ ], [ ] targets_dates_out, targets_lats_out, targets_lons_out = [ ], [ ], [ ] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields] write_BERT( cf.wandb_id, epoch, batch_idx, levels, sources_out, [sources_dates_out, sources_lats_out, sources_lons_out], targets_out, [targets_dates_out, targets_lats_out, targets_lons_out], preds_out, ensembles_out ) def log_attention( self, epoch, bidx, log) : '''Hook for logging: output attention maps.''' cf = self.cf attention, token_infos = log attn_dates_out, attn_lats_out, attn_lons_out = [ ], [ ], [ ] attn_out = [] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) coords_b = [] for tinfo in tinfos : # use first vertical levels since a column is considered res = tinfo[0,0,0,0,-1] lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res*token_size[1]) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res*token_size[2]), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res*token_size[2]) lons = np.remainder( lons, 360.) dates = np.array([(utils.token_info_to_time(tinfo[0,t,0,0,:3])) for t in range(tinfo.shape[1])], dtype='datetime64[s]') coords_b += [ [dates, lats, lons] ] if is_predicted: attn_out.append([field_info[0], attention[fidx]]) attn_dates_out.append([c[0] for c in coords_b]) attn_lats_out.append( [c[1] for c in coords_b]) attn_lons_out.append( [c[2] for c in coords_b]) else: attn_dates_out.append( [] ) attn_lats_out.append( [] ) attn_lons_out.append( [] ) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
write_attention(cf.wandb_id, epoch,
12
2023-10-09 19:42:46+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n\n self.cfg = cfg\n self.args = args\n\n self.idx = slam.idx\n self.c = slam.shared_c\n self.bound = slam.bound\n self.logger = slam.logger\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.renderer = slam.renderer\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.mapping_first_frame = slam.mapping_first_frame\n self.scene_id = slam.scene_id\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n \n \n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.device = cfg['mapping']['device']\n self.fix_high = cfg['mapping']['fix_high']\n self.eval_rec = cfg['meshing']['eval_rec']\n \n \n self.mesh_freq = cfg['mapping']['mesh_freq']\n self.ckpt_freq = cfg['mapping']['ckpt_freq']\n self.fix_color = cfg['mapping']['fix_color']\n self.mapping_pixels = cfg['mapping']['pixels']\n self.num_joint_iters = cfg['mapping']['iters']\n self.clean_mesh = cfg['meshing']['clean_mesh']\n self.every_frame = cfg['mapping']['every_frame']\n self.color_refine = cfg['mapping']['color_refine']\n self.w_color_loss = cfg['mapping']['w_color_loss']\n self.keyframe_every = cfg['mapping']['keyframe_every']\n self.high_iter_ratio = cfg['mapping']['high_iter_ratio']\n self.low_iter_ratio = cfg['mapping']['low_iter_ratio']\n self.mapping_window_size = cfg['mapping']['mapping_window_size']\n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame']\n self.no_log_on_first_frame = cfg['mapping']['no_log_on_first_frame']\n self.no_mesh_on_first_frame = cfg['mapping']['no_mesh_on_first_frame']\n self.frustum_feature_selection = cfg['mapping']['frustum_feature_selection']\n self.keyframe_selection_method = cfg['mapping']['keyframe_selection_method']\n self.save_selected_keyframes_info = cfg['mapping']['save_selected_keyframes_info']\n if self.save_selected_keyframes_info:\n self.selected_keyframes = {}\n\n\n self.keyframe_dict = []\n self.keyframe_list = []\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n if 'Demo' not in self.output: # disable this visualization in demo\n self.visualizer = Visualizer(freq=cfg['mapping']['vis_freq'], inside_freq=cfg['mapping']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'mapping_vis'), renderer=self.renderer,\n verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def get_mask_from_c2w(self, c2w, key, val_shape, depth_np):\n \"\"\"\n Frustum feature selection based on current camera pose and depth image.\n\n Args:\n c2w (tensor): camera pose of current frame.\n key (str): name of this feature grid.\n val_shape (tensor): shape of the grid.\n depth_np (numpy.array): depth image of current frame.\n\n Returns:\n mask (tensor): mask for selected optimizable feature.\n points (tensor): corresponding point coordinates.\n \"\"\"\n H, W, fx, fy, cx, cy, = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n X, Y, Z = torch.meshgrid(torch.linspace(self.bound[0][0], self.bound[0][1], val_shape[2]),\n torch.linspace(self.bound[1][0], self.bound[1][1], val_shape[1]),\n torch.linspace(self.bound[2][0], self.bound[2][1], val_shape[0]))\n\n points = torch.stack([X, Y, Z], dim=-1).reshape(-1, 3)\n points_bak = points.clone()\n c2w = c2w.cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(points[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [points, ones], axis=1).reshape(-1, 4, 1)\n cam_cord_homo = w2c@homo_vertices\n cam_cord = cam_cord_homo[:, :3]\n K = np.array([[fx, .0, cx], [.0, fy, cy], [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n\n remap_chunk = int(3e4)\n depths = []\n for i in range(0, uv.shape[0], remap_chunk):\n depths += [cv2.remap(depth_np,\n uv[i:i+remap_chunk, 0],\n uv[i:i+remap_chunk, 1],\n interpolation=cv2.INTER_LINEAR)[:, 0].reshape(-1, 1)]\n depths = np.concatenate(depths, axis=0)\n\n edge = 0\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n\n # For ray with depth==0, fill it with maximum depth\n zero_mask = (depths == 0)\n depths[zero_mask] = np.max(depths)\n\n # depth test\n mask = mask & (0 <= -z[:, :, 0]) & (-z[:, :, 0] <= depths+0.5)\n mask = mask.reshape(-1)\n\n # add feature grid near cam center\n ray_o = c2w[:3, 3]\n ray_o = torch.from_numpy(ray_o).unsqueeze(0)\n\n dist = points_bak-ray_o\n dist = torch.sum(dist*dist, axis=1)\n mask2 = dist < 0.5*0.5\n mask2 = mask2.cpu().numpy()\n mask = mask | mask2\n\n points = points[mask]\n mask = mask.reshape(val_shape[2], val_shape[1], val_shape[0])\n return mask\n\n def keyframe_selection_overlap(self, gt_color, gt_depth, c2w, keyframe_dict, k, N_samples=16, pixels=100):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n c2w (tensor): camera to world matrix (3*4 or 4*4 both fine).\n keyframe_dict (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n N_samples (int, optional): number of samples/points per ray. Defaults to 16.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 100.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n rays_o, rays_d, gt_depth, gt_color = get_samples(\n 0, H, 0, W, pixels, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth = gt_depth.repeat(1, N_samples)\n t_vals = torch.linspace(0., 1., steps=N_samples).to(device)\n near = gt_depth*0.8\n far = gt_depth+0.5\n z_vals = near * (1.-t_vals) + far * (t_vals)\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples, 3]\n vertices = pts.reshape(-1, 3).cpu().numpy()\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_dict):\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(vertices[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [vertices, ones], axis=1).reshape(-1, 4, 1) # (N, 4)\n cam_cord_homo = w2c@homo_vertices # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n K = np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n edge = 20\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n mask = mask & (z[:, :, 0] < 0)\n mask = mask.reshape(-1)\n percent_inside = mask.sum()/uv.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n selected_keyframe_list = [dic['id']\n for dic in list_keyframe if dic['percent_inside'] > 0.00]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n return selected_keyframe_list\n \n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, 500)\n bound = self.bound\n rets = []\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n return ret\n\n def optimize_map(self, num_joint_iters, lr_factor, idx, cur_gt_color, cur_gt_depth, gt_cur_c2w, keyframe_dict, keyframe_list, tsdf_volume, cur_c2w):\n \"\"\"\n Mapping iterations. Sample pixels from selected keyframes,\n then optimize scene representation.\n\n Args:\n num_joint_iters (int): number of mapping iterations.\n lr_factor (float): the factor to times on current lr.\n idx (int): the index of current frame\n cur_gt_color (tensor): gt_color image of the current camera.\n cur_gt_depth (tensor): gt_depth image of the current camera.\n gt_cur_c2w (tensor): groundtruth camera to world matrix corresponding to current frame.\n keyframe_dict (list): list of keyframes info dictionary.\n keyframe_list (list): list ofkeyframe index.\n tsdf_volume (tensor): tsdf volume.\n cur_c2w (tensor): the estimated camera to world matrix of current frame. \n\n Returns:\n return None\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n c = self.c\n cfg = self.cfg\n device = self.device\n tsdf_bnds = self.tsdf_bnds.to(device)\n\n if len(keyframe_dict) == 0:\n optimize_frame = []\n else:\n if self.keyframe_selection_method == 'global':\n num = self.mapping_window_size-2\n optimize_frame = random_select(len(self.keyframe_dict)-1, num)\n elif self.keyframe_selection_method == 'overlap':\n num = self.mapping_window_size-2\n optimize_frame = self.keyframe_selection_overlap(\n cur_gt_color, cur_gt_depth, cur_c2w, keyframe_dict[:-1], num)\n\n # add the last keyframe and the current frame(use -1 to denote)\n oldest_frame = None\n if len(keyframe_list) > 0:\n optimize_frame = optimize_frame + [len(keyframe_list)-1]\n oldest_frame = min(optimize_frame)\n optimize_frame += [-1]\n\n if self.save_selected_keyframes_info:\n keyframes_info = []\n for id, frame in enumerate(optimize_frame):\n if frame != -1:\n frame_idx = keyframe_list[frame]\n tmp_gt_c2w = keyframe_dict[frame]['gt_c2w']\n tmp_est_c2w = keyframe_dict[frame]['est_c2w']\n else:\n frame_idx = idx\n tmp_gt_c2w = gt_cur_c2w\n tmp_est_c2w = cur_c2w\n keyframes_info.append(\n {'idx': frame_idx, 'gt_c2w': tmp_gt_c2w, 'est_c2w': tmp_est_c2w})\n self.selected_keyframes[idx] = keyframes_info\n\n pixs_per_image = self.mapping_pixels//len(optimize_frame)\n\n mlp_para_list = []\n decoders_para_list = []\n low_grid_para = []\n high_grid_para = []\n color_grid_para = []\n gt_depth_np = cur_gt_depth.cpu().numpy()\n if True:\n if self.frustum_feature_selection:\n masked_c_grad = {}\n mask_c2w = cur_c2w\n for key, val in c.items():\n if not self.frustum_feature_selection:\n val = Variable(val.to(device), requires_grad=True)\n c[key] = val\n if key == 'grid_low':\n low_grid_para.append(val)\n elif key == 'grid_high':\n high_grid_para.append(val)\n elif key == 'grid_color':\n color_grid_para.append(val)\n\n else:\n mask = self.get_mask_from_c2w(\n mask_c2w, key, val.shape[2:], gt_depth_np)\n mask = torch.from_numpy(mask).permute(2, 1, 0).unsqueeze(\n 0).unsqueeze(0).repeat(1, val.shape[1], 1, 1, 1)\n val = val.to(device)\n # val_grad is the optimizable part, other parameters will be fixed\n val_grad = val[mask].clone()\n val_grad = Variable(val_grad.to(\n device), requires_grad=True)\n masked_c_grad[key] = val_grad\n masked_c_grad[key+'mask'] = mask\n if key == 'grid_low':\n low_grid_para.append(val_grad)\n elif key == 'grid_high':\n high_grid_para.append(val_grad)\n elif key == 'grid_color':\n color_grid_para.append(val_grad)\n\n\n if not self.fix_high:\n decoders_para_list += list(\n self.decoders.high_decoder.parameters())\n if not self.fix_color:\n decoders_para_list += list(\n self.decoders.color_decoder.parameters())\n mlp_para_list += list(\n self.decoders.mlp.parameters())\n \n\n optimizer = torch.optim.Adam([{'params': decoders_para_list, 'lr': 0},\n {'params': mlp_para_list, 'lr': 0},\n {'params': low_grid_para, 'lr': 0},\n {'params': high_grid_para, 'lr': 0},\n {'params': color_grid_para, 'lr': 0}])\n \n\n for joint_iter in range(num_joint_iters):\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.to(device)\n val[mask] = val_grad\n c[key] = val\n\n if joint_iter <= int(num_joint_iters*self.low_iter_ratio):\n self.stage = 'low'\n elif joint_iter <= int(num_joint_iters*self.high_iter_ratio):\n self.stage = 'high'\n else:\n self.stage = 'color'\n\n optimizer.param_groups[0]['lr'] = cfg['mapping']['stage'][self.stage]['decoders_lr']*lr_factor\n optimizer.param_groups[1]['lr'] = cfg['mapping']['stage'][self.stage]['mlp_lr']*lr_factor\n optimizer.param_groups[2]['lr'] = cfg['mapping']['stage'][self.stage]['low_lr']*lr_factor\n optimizer.param_groups[3]['lr'] = cfg['mapping']['stage'][self.stage]['high_lr']*lr_factor\n optimizer.param_groups[4]['lr'] = cfg['mapping']['stage'][self.stage]['color_lr']*lr_factor\n \n if (not (idx == 0 and self.no_vis_on_first_frame)) and ('Demo' not in self.output):\n self.visualizer.vis(\n idx, joint_iter, cur_gt_depth, cur_gt_color, cur_c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n optimizer.zero_grad()\n batch_rays_d_list = []\n batch_rays_o_list = []\n batch_gt_depth_list = []\n batch_gt_color_list = []\n\n camera_tensor_id = 0\n for frame in optimize_frame:\n if frame != -1:\n gt_depth = keyframe_dict[frame]['depth'].to(device)\n gt_color = keyframe_dict[frame]['color'].to(device)\n c2w = keyframe_dict[frame]['est_c2w']\n\n else:\n gt_depth = cur_gt_depth.to(device)\n gt_color = cur_gt_color.to(device)\n c2w = cur_c2w\n\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n 0, H, 0, W, pixs_per_image, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n batch_rays_o_list.append(batch_rays_o.float())\n batch_rays_d_list.append(batch_rays_d.float())\n batch_gt_depth_list.append(batch_gt_depth.float())\n batch_gt_color_list.append(batch_gt_color.float())\n\n batch_rays_d = torch.cat(batch_rays_d_list)\n batch_rays_o = torch.cat(batch_rays_o_list)\n batch_gt_depth = torch.cat(batch_gt_depth_list)\n batch_gt_color = torch.cat(batch_gt_color_list)\n\n\n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(\n device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(c, self.decoders, batch_rays_d,\n batch_rays_o, device, tsdf_volume, tsdf_bnds, self.stage,\n batch_gt_depth)\n depth, uncertainty, color, weight = ret\n\n\n depth_mask = (batch_gt_depth > 0)\n \n if joint_iter > int(num_joint_iters*self.low_iter_ratio) and joint_iter <= int(num_joint_iters*self.low_iter_ratio)+5 and idx <= 1:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum() + torch.abs(weight-torch.ones(weight.shape).to(device)).sum()\n else:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum()\n \n if self.stage == 'color':\n color_loss = torch.abs(batch_gt_color - color).sum()\n weighted_color_loss = self.w_color_loss*color_loss\n loss += weighted_color_loss\n\n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n\n # put selected and updated features back to the grid\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.detach()\n val[mask] = val_grad.clone().detach()\n c[key] = val\n\n return None\n\n\n def run(self):\n cfg = self.cfg\n idx, gt_color, gt_depth, gt_c2w = self.frame_reader[0]\n\n self.estimate_c2w_list[0] = gt_c2w.cpu()\n init = True\n prev_idx = -1\n tsdf_volume = self.tsdf_volume_shared\n \n while (1):\n while True:\n idx = self.idx[0].clone()\n if idx == self.n_img-1:\n break\n if self.sync_method == 'strict':\n if idx % self.every_frame == 0 and idx != prev_idx:\n break\n elif self.sync_method == 'loose':\n if idx == 0 or idx >= prev_idx+self.every_frame//2:\n break\n elif self.sync_method == 'free':\n break\n time.sleep(0.1)\n prev_idx = idx\n\n if self.verbose:\n print(Fore.GREEN)\n prefix = ''\n print(prefix+\"Mapping Frame \", idx.item())\n print(Style.RESET_ALL)\n\n _, gt_color, gt_depth, gt_c2w = self.frame_reader[idx]\n\n # valid c2w\n valid_c2w = gt_c2w.clone().cpu().numpy()\n if not np.isfinite(valid_c2w).any():\n self.mapping_idx[0] = idx\n continue\n\n\n if not init:\n lr_factor = cfg['mapping']['lr_factor']\n num_joint_iters = cfg['mapping']['iters']\n\n # here provides a color refinement postprocess\n if idx == self.n_img-1 and self.color_refine:\n outer_joint_iters = 5\n self.mapping_window_size *= 2\n self.low_iter_ratio = 0.0\n self.high_iter_ratio = 0.0\n num_joint_iters *= 5\n self.fix_color = True\n self.frustum_feature_selection = False\n else:\n outer_joint_iters = 1\n \n\n else:\n outer_joint_iters = 1\n lr_factor = cfg['mapping']['lr_first_factor']\n num_joint_iters = cfg['mapping']['iters_first']\n\n cur_c2w = self.estimate_c2w_list[idx].to(self.device)\n num_joint_iters = num_joint_iters//outer_joint_iters\n \n for outer_joint_iter in range(outer_joint_iters):\n\n\n _ = self.optimize_map(num_joint_iters, lr_factor, idx, gt_color, gt_depth,\n gt_c2w, self.keyframe_dict, self.keyframe_list, tsdf_volume, cur_c2w=cur_c2w)\n \n\n # add new frame to keyframe set\n if outer_joint_iter == outer_joint_iters-1:\n if (idx % self.keyframe_every == 0 or (idx == self.n_img-2)) \\\n and (idx not in self.keyframe_list):\n self.keyframe_list.append(idx)\n self.keyframe_dict.append({'gt_c2w': gt_c2w.cpu(), 'idx': idx, 'color': gt_color.cpu(\n ), 'depth': gt_depth.cpu(), 'est_c2w': cur_c2w.clone()})\n\n if self.low_gpu_mem:\n torch.cuda.empty_cache()\n\n init = False\n # mapping of first frame is done, can begin tracking\n self.mapping_first_frame[0] = 1\n\n if True:\n if ((not (idx == 0 and self.no_log_on_first_frame)) and idx % self.ckpt_freq == 0) \\\n or idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n self.logger.log(idx, self.keyframe_dict, self.keyframe_list,\n selected_keyframes=self.selected_keyframes\n if self.save_selected_keyframes_info else None)\n\n self.mapping_idx[0] = idx\n self.mapping_cnt[0] += 1\n\n if (idx % self.mesh_freq == 0) and (not (idx == 0 and self.no_mesh_on_first_frame)):\n mesh_out_file = f'{self.output}/mesh/{idx:05d}_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n mesh_out_file = f'{self.output}/mesh/final_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n os.system(\n f\"cp {mesh_out_file} {self.output}/mesh/{idx:05d}_mesh.ply\")\n if self.eval_rec:\n mesh_out_file = f'{self.output}/mesh/final_mesh_eval_rec.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict,\n self.estimate_c2w_list, idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=True)\n break\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n break" }, { "identifier": "Tracker", "path": "src/Tracker.py", "snippet": "class Tracker(object):\n def __init__(self, cfg, args, slam\n ):\n self.cfg = cfg\n self.args = args\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.idx = slam.idx\n self.bound = slam.bound\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.shared_c = slam.shared_c\n self.renderer = slam.renderer\n self.gt_c2w_list = slam.gt_c2w_list\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n self.cam_lr = cfg['tracking']['lr']\n self.device = cfg['tracking']['device']\n self.num_cam_iters = cfg['tracking']['iters']\n self.gt_camera = cfg['tracking']['gt_camera']\n self.tracking_pixels = cfg['tracking']['pixels']\n self.seperate_LR = cfg['tracking']['seperate_LR']\n self.w_color_loss = cfg['tracking']['w_color_loss']\n self.ignore_edge_W = cfg['tracking']['ignore_edge_W']\n self.ignore_edge_H = cfg['tracking']['ignore_edge_H']\n self.handle_dynamic = cfg['tracking']['handle_dynamic']\n self.use_color_in_tracking = cfg['tracking']['use_color_in_tracking']\n self.const_speed_assumption = cfg['tracking']['const_speed_assumption']\n\n self.every_frame = cfg['mapping']['every_frame'] \n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame'] # ori mapping\n\n self.prev_mapping_idx = -1\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n self.frame_loader = DataLoader(\n self.frame_reader, batch_size=1, shuffle=False, num_workers=1)\n self.visualizer = Visualizer(freq=cfg['tracking']['vis_freq'], inside_freq=cfg['tracking']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'vis' if 'Demo' in self.output else 'tracking_vis'),\n renderer=self.renderer, verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def optimize_cam_in_batch(self, camera_tensor, gt_color, gt_depth, batch_size, optimizer, tsdf_volume):\n \"\"\"\n Do one iteration of camera iteration. Sample pixels, render depth/color, calculate loss and backpropagation.\n\n Args:\n camera_tensor (tensor): camera tensor.\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n batch_size (int): batch size, number of sampling rays.\n optimizer (torch.optim): camera optimizer.\n tsdf_volume (tensor): tsdf volume\n\n Returns:\n loss (float): The value of loss.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n optimizer.zero_grad()\n c2w = get_camera_from_tensor(camera_tensor)\n tsdf_bnds = self.tsdf_bnds.to(device)\n Wedge = self.ignore_edge_W\n Hedge = self.ignore_edge_H\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n Hedge, H-Hedge, Wedge, W-Wedge, batch_size, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n \n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(\n self.c, self.decoders, batch_rays_d, batch_rays_o, self.device, tsdf_volume, tsdf_bnds, stage='color', gt_depth=batch_gt_depth) #color\n depth, uncertainty, color, _ = ret\n\n uncertainty = uncertainty.detach()\n if self.handle_dynamic:\n tmp = torch.abs(batch_gt_depth-depth)/torch.sqrt(uncertainty+1e-10)\n mask = (tmp < 10*tmp.median()) & (batch_gt_depth > 0)\n else:\n mask = batch_gt_depth > 0\n\n loss = (torch.abs(batch_gt_depth-depth) /\n torch.sqrt(uncertainty+1e-10))[mask].sum()\n\n if self.use_color_in_tracking:\n color_loss = torch.abs(\n batch_gt_color - color)[mask].sum()\n loss += self.w_color_loss*color_loss\n \n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n return loss.item()\n\n def update_para_from_mapping(self):\n \"\"\"\n Update the parameters of scene representation from the mapping thread.\n\n \"\"\"\n if self.mapping_idx[0] != self.prev_mapping_idx:\n if self.verbose:\n print('Tracking: update the parameters from mapping')\n self.decoders = copy.deepcopy(self.shared_decoders).to(self.device)\n for key, val in self.shared_c.items():\n val = val.clone().to(self.device)\n self.c[key] = val\n self.prev_mapping_idx = self.mapping_idx[0].clone()\n\n def run(self):\n device = self.device\n tsdf_volume = self.tsdf_volume_shared\n tsdf_bnds = self.tsdf_bnds.to(device)\n \n self.c = {}\n if self.verbose:\n pbar = self.frame_loader\n else:\n pbar = tqdm(self.frame_loader)\n\n for idx, gt_color, gt_depth, gt_c2w in pbar:\n if not self.verbose:\n pbar.set_description(f\"Tracking Frame {idx[0]}\")\n\n idx = idx[0]\n gt_depth = gt_depth[0]\n gt_color = gt_color[0]\n gt_c2w = gt_c2w[0]\n\n if self.sync_method == 'strict':\n # strictly mapping and then tracking\n # initiate mapping every self.every_frame frames\n if idx > 0 and (idx % self.every_frame == 1 or self.every_frame == 1):\n while self.mapping_idx[0] != idx-1:\n time.sleep(0.1)\n pre_c2w = self.estimate_c2w_list[idx-1].to(device)\n elif self.sync_method == 'loose':\n # mapping idx can be later than tracking idx is within the bound of\n # [-self.every_frame-self.every_frame//2, -self.every_frame+self.every_frame//2]\n while self.mapping_idx[0] < idx-self.every_frame-self.every_frame//2:\n time.sleep(0.1)\n elif self.sync_method == 'free':\n # pure parallel, if mesh/vis happens may cause inbalance\n pass\n\n self.update_para_from_mapping()\n\n if self.verbose:\n print(Fore.MAGENTA)\n print(\"Tracking Frame \", idx.item())\n print(Style.RESET_ALL)\n \n \n\n if idx == 0 or self.gt_camera:\n c2w = gt_c2w\n if not self.no_vis_on_first_frame:\n self.visualizer.vis(\n idx, 0, gt_depth, gt_color, c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n \n else:\n gt_camera_tensor = get_tensor_from_camera(gt_c2w)\n if self.const_speed_assumption and idx-2 >= 0:\n pre_c2w = pre_c2w.float()\n delta = [email protected]_c2w_list[idx-2].to(\n device).float().inverse()\n estimated_new_cam_c2w = delta@pre_c2w\n else:\n estimated_new_cam_c2w = pre_c2w\n\n camera_tensor = get_tensor_from_camera(\n estimated_new_cam_c2w.detach())\n if self.seperate_LR:\n camera_tensor = camera_tensor.to(device).detach()\n T = camera_tensor[-3:]\n quad = camera_tensor[:4]\n cam_para_list_quad = [quad]\n quad = Variable(quad, requires_grad=True)\n T = Variable(T, requires_grad=True)\n camera_tensor = torch.cat([quad, T], 0)\n cam_para_list_T = [T]\n cam_para_list_quad = [quad]\n optimizer_camera = torch.optim.Adam([{'params': cam_para_list_T, 'lr': self.cam_lr},\n {'params': cam_para_list_quad, 'lr': self.cam_lr*0.2}])\n else:\n camera_tensor = Variable(\n camera_tensor.to(device), requires_grad=True)\n cam_para_list = [camera_tensor]\n optimizer_camera = torch.optim.Adam(\n cam_para_list, lr=self.cam_lr)\n\n initial_loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n candidate_cam_tensor = None\n current_min_loss = 10000000000.\n\n \n\n for cam_iter in range(self.num_cam_iters):\n if self.seperate_LR:\n camera_tensor = torch.cat([quad, T], 0).to(self.device)\n\n self.visualizer.vis(\n idx, cam_iter, gt_depth, gt_color, camera_tensor, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n loss = self.optimize_cam_in_batch(\n camera_tensor, gt_color, gt_depth, self.tracking_pixels, optimizer_camera, tsdf_volume)\n\n if cam_iter == 0:\n initial_loss = loss\n\n loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n if self.verbose:\n if cam_iter == self.num_cam_iters-1:\n print(\n f'Re-rendering loss: {initial_loss:.2f}->{loss:.2f} ' +\n f'camera tensor error: {initial_loss_camera_tensor:.4f}->{loss_camera_tensor:.4f}')\n if loss < current_min_loss:\n current_min_loss = loss\n candidate_cam_tensor = camera_tensor.clone().detach()\n bottom = torch.from_numpy(np.array([0, 0, 0, 1.]).reshape(\n [1, 4])).type(torch.float32).to(self.device)\n c2w = get_camera_from_tensor(\n candidate_cam_tensor.clone().detach())\n c2w = torch.cat([c2w, bottom], dim=0)\n\n \n self.estimate_c2w_list[idx] = c2w.clone().cpu()\n self.gt_c2w_list[idx] = gt_c2w.clone().cpu()\n pre_c2w = c2w.clone()\n self.idx[0] = idx\n if self.low_gpu_mem:\n torch.cuda.empty_cache()" }, { "identifier": "get_dataset", "path": "src/utils/datasets.py", "snippet": "def get_dataset(cfg, args, scale, device='cuda:0'):\n return dataset_dict[cfg['dataset']](cfg, args, scale, device=device)" }, { "identifier": "Logger", "path": "src/utils/Logger.py", "snippet": "class Logger(object):\n \"\"\"\n Save checkpoints to file.\n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n self.verbose = slam.verbose\n self.ckptsdir = slam.ckptsdir\n self.shared_c = slam.shared_c\n self.gt_c2w_list = slam.gt_c2w_list\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.tsdf_volume = slam.tsdf_volume_shared\n\n def log(self, idx, keyframe_dict, keyframe_list, selected_keyframes=None):\n path = os.path.join(self.ckptsdir, '{:05d}.tar'.format(idx))\n torch.save({\n 'c': self.shared_c,\n 'decoder_state_dict': self.shared_decoders.state_dict(),\n 'gt_c2w_list': self.gt_c2w_list,\n 'estimate_c2w_list': self.estimate_c2w_list,\n 'keyframe_list': keyframe_list,\n 'keyframe_dict': keyframe_dict, # to save keyframe_dict into ckpt, uncomment this line\n 'selected_keyframes': selected_keyframes,\n 'idx': idx,\n 'tsdf_volume': self.tsdf_volume,\n }, path, _use_new_zipfile_serialization=False)\n\n if self.verbose:\n print('Saved checkpoints at', path)" }, { "identifier": "Mesher", "path": "src/utils/Mesher.py", "snippet": "class Mesher(object):\n\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n \"\"\"\n Mesher class, given a scene representation, the mesher extracts the mesh from it.\n\n Args:\n cfg (dict): parsed config dict.\n args (class 'argparse.Namespace'): argparse arguments.\n slam (class DF_Prior): DF_Prior main class.\n points_batch_size (int): maximum points size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 500000.\n ray_batch_size (int): maximum ray size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 100000.\n \"\"\"\n self.points_batch_size = points_batch_size\n self.ray_batch_size = ray_batch_size\n self.renderer = slam.renderer\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n \n self.resolution = cfg['meshing']['resolution']\n self.level_set = cfg['meshing']['level_set']\n self.clean_mesh_bound_scale = cfg['meshing']['clean_mesh_bound_scale']\n self.remove_small_geometry_threshold = cfg['meshing']['remove_small_geometry_threshold']\n self.color_mesh_extraction_method = cfg['meshing']['color_mesh_extraction_method']\n self.get_largest_components = cfg['meshing']['get_largest_components']\n self.depth_test = cfg['meshing']['depth_test']\n \n self.bound = slam.bound\n self.verbose = slam.verbose\n \n\n self.marching_cubes_bound = torch.from_numpy(\n np.array(cfg['mapping']['marching_cubes_bound']) * self.scale)\n\n self.frame_reader = get_dataset(cfg, args, self.scale, device='cpu')\n self.n_img = len(self.frame_reader)\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n\n def point_masks(self, input_points, keyframe_dict, estimate_c2w_list,\n idx, device, get_mask_use_all_frames=False):\n \"\"\"\n Split the input points into seen, unseen, and forcast,\n according to the estimated camera pose and depth image.\n\n Args:\n input_points (tensor): input points.\n keyframe_dict (list): list of keyframe info dictionary.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current frame index.\n device (str): device name to compute on.\n\n Returns:\n seen_mask (tensor): the mask for seen area.\n forecast_mask (tensor): the mask for forecast area.\n unseen_mask (tensor): the mask for unseen area.\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n if not isinstance(input_points, torch.Tensor):\n input_points = torch.from_numpy(input_points)\n input_points = input_points.clone().detach()\n seen_mask_list = []\n forecast_mask_list = []\n unseen_mask_list = []\n for i, pnts in enumerate(\n torch.split(input_points, self.points_batch_size, dim=0)):\n points = pnts.to(device).float()\n # should divide the points into three parts, seen and forecast and unseen\n # seen: union of all the points in the viewing frustum of keyframes\n # forecast: union of all the points in the extended edge of the viewing frustum of keyframes\n # unseen: all the other points\n\n seen_mask = torch.zeros((points.shape[0])).bool().to(device)\n forecast_mask = torch.zeros((points.shape[0])).bool().to(device)\n if get_mask_use_all_frames:\n for i in range(0, idx + 1, 1):\n c2w = estimate_c2w_list[i].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float() # (N, 4)\n # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n else:\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float()\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3]\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n if self.depth_test:\n gt_depth = keyframe['depth'].to(\n device).reshape(1, 1, H, W)\n vgrid = uv.reshape(1, 1, -1, 2)\n # normalized to [-1, 1]\n vgrid[..., 0] = (vgrid[..., 0] / (W-1) * 2.0 - 1.0)\n vgrid[..., 1] = (vgrid[..., 1] / (H-1) * 2.0 - 1.0)\n depth_sample = F.grid_sample(\n gt_depth, vgrid, padding_mode='zeros', align_corners=True)\n depth_sample = depth_sample.reshape(-1)\n max_depth = torch.max(depth_sample)\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone()] &= \\\n (proj_depth_seen < depth_sample[cur_mask_seen]+2.4) \\\n & (depth_sample[cur_mask_seen]-2.4 < proj_depth_seen)\n else:\n max_depth = torch.max(keyframe['depth'])*1.1\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[\n cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - \\\n cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone(\n )] &= proj_depth_seen < max_depth\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n\n forecast_mask &= ~seen_mask\n unseen_mask = ~(seen_mask | forecast_mask)\n\n seen_mask = seen_mask.cpu().numpy()\n forecast_mask = forecast_mask.cpu().numpy()\n unseen_mask = unseen_mask.cpu().numpy()\n\n seen_mask_list.append(seen_mask)\n forecast_mask_list.append(forecast_mask)\n unseen_mask_list.append(unseen_mask)\n\n seen_mask = np.concatenate(seen_mask_list, axis=0)\n forecast_mask = np.concatenate(forecast_mask_list, axis=0)\n unseen_mask = np.concatenate(unseen_mask_list, axis=0)\n return seen_mask, forecast_mask, unseen_mask\n\n def get_bound_from_frames(self, keyframe_dict, scale=1):\n \"\"\"\n Get the scene bound (convex hull),\n using sparse estimated camera poses and corresponding depth images.\n\n Args:\n keyframe_dict (list): list of keyframe info dictionary.\n scale (float): scene scale.\n\n Returns:\n return_mesh (trimesh.Trimesh): the convex hull.\n \"\"\"\n\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n # for new version as provided in environment.yaml\n volume = o3d.pipelines.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)\n else:\n # for lower version\n volume = o3d.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.integration.TSDFVolumeColorType.RGB8)\n cam_points = []\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n # convert to open3d camera pose\n c2w[:3, 1] *= -1.0\n c2w[:3, 2] *= -1.0\n w2c = np.linalg.inv(c2w)\n cam_points.append(c2w[:3, 3])\n depth = keyframe['depth'].cpu().numpy()\n color = keyframe['color'].cpu().numpy()\n\n depth = o3d.geometry.Image(depth.astype(np.float32))\n color = o3d.geometry.Image(np.array(\n (color * 255).astype(np.uint8)))\n\n intrinsic = o3d.camera.PinholeCameraIntrinsic(W, H, fx, fy, cx, cy)\n rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(\n color,\n depth,\n depth_scale=1,\n depth_trunc=1000,\n convert_rgb_to_intensity=False)\n volume.integrate(rgbd, intrinsic, w2c)\n\n cam_points = np.stack(cam_points, axis=0)\n mesh = volume.extract_triangle_mesh()\n mesh_points = np.array(mesh.vertices)\n points = np.concatenate([cam_points, mesh_points], axis=0)\n o3d_pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))\n mesh, _ = o3d_pc.compute_convex_hull()\n mesh.compute_vertex_normals()\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n mesh = mesh.scale(self.clean_mesh_bound_scale, mesh.get_center())\n else:\n mesh = mesh.scale(self.clean_mesh_bound_scale, center=True)\n points = np.array(mesh.vertices)\n faces = np.array(mesh.triangles)\n return_mesh = trimesh.Trimesh(vertices=points, faces=faces)\n return return_mesh\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n\n return ret\n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n tsdf_volume (tensor): tsdf volume.\n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def get_grid_uniform(self, resolution):\n \"\"\"\n Get query point coordinates for marching cubes.\n\n Args:\n resolution (int): marching cubes resolution.\n\n Returns:\n (dict): points coordinates and sampled coordinates for each axis.\n \"\"\"\n bound = self.marching_cubes_bound\n\n padding = 0.05\n x = np.linspace(bound[0][0] - padding, bound[0][1] + padding,\n resolution)\n y = np.linspace(bound[1][0] - padding, bound[1][1] + padding,\n resolution)\n z = np.linspace(bound[2][0] - padding, bound[2][1] + padding,\n resolution)\n\n xx, yy, zz = np.meshgrid(x, y, z)\n grid_points = np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T\n grid_points = torch.tensor(np.vstack(\n [xx.ravel(), yy.ravel(), zz.ravel()]).T,\n dtype=torch.float)\n\n\n\n return {\"grid_points\": grid_points, \"xyz\": [x, y, z]}\n\n def get_mesh(self,\n mesh_out_file,\n c,\n decoders,\n keyframe_dict,\n estimate_c2w_list,\n idx,\n tsdf_volume,\n device='cuda:0',\n color=True,\n clean_mesh=True,\n get_mask_use_all_frames=False):\n \"\"\"\n Extract mesh from scene representation and save mesh to file.\n\n Args:\n mesh_out_file (str): output mesh filename.\n c (dicts): feature grids.\n decoders (nn.module): decoders.\n keyframe_dict (list): list of keyframe info.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current processed camera ID.\n tsdf volume (tensor): tsdf volume.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n color (bool, optional): whether to extract colored mesh. Defaults to True.\n clean_mesh (bool, optional): whether to clean the output mesh \n (remove outliers outside the convexhull and small geometry noise). \n Defaults to True.\n get_mask_use_all_frames (bool, optional): \n whether to use all frames or just keyframes when getting the seen/unseen mask. Defaults to False.\n \"\"\"\n with torch.no_grad():\n\n grid = self.get_grid_uniform(self.resolution) \n points = grid['grid_points']\n points = points.to(device)\n eval_tsdf_volume = tsdf_volume\n\n mesh_bound = self.get_bound_from_frames(\n keyframe_dict, self.scale)\n z = []\n mask = []\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n mask.append(mesh_bound.contains(pnts.cpu().numpy()))\n mask = np.concatenate(mask, axis=0)\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n eval_tsdf = self.eval_points_tsdf(pnts, eval_tsdf_volume, device)\n eval_tsdf_mask = ((eval_tsdf > -1.0+1e-4) & (eval_tsdf < 1.0-1e-4)).cpu().numpy()\n ret = self.eval_points(pnts, decoders, tsdf_volume, self.tsdf_bnds, c, 'high', device)\n ret = ret.cpu().numpy()[:, -1]\n\n eval_tsdf_mask = eval_tsdf_mask.reshape(ret.shape)\n z.append(ret)\n \n z = np.concatenate(z, axis=0)\n z[~mask] = 100\n z = z.astype(np.float32)\n\n z_uni_m = z.reshape(\n grid['xyz'][1].shape[0], grid['xyz'][0].shape[0],\n grid['xyz'][2].shape[0]).transpose([1, 0, 2])\n\n print('begin marching cube...')\n combine_occ_tsdf = z_uni_m\n\n try:\n if version.parse(\n skimage.__version__) > version.parse('0.15.0'):\n # for new version as provided in environment.yaml\n verts, faces, normals, values = skimage.measure.marching_cubes(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n else:\n # for lower version\n verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n except:\n print(\n 'marching_cubes error. Possibly no surface extracted from the level set.'\n )\n return\n\n # convert back to world coordinates\n vertices = verts + np.array(\n [grid['xyz'][0][0], grid['xyz'][1][0], grid['xyz'][2][0]])\n\n if clean_mesh:\n points = vertices\n mesh = trimesh.Trimesh(vertices=vertices,\n faces=faces,\n process=False)\n seen_mask, _, unseen_mask = self.point_masks(\n points, keyframe_dict, estimate_c2w_list, idx, device=device, \n get_mask_use_all_frames=get_mask_use_all_frames)\n unseen_mask = ~seen_mask\n face_mask = unseen_mask[mesh.faces].all(axis=1)\n mesh.update_faces(~face_mask)\n\n # get connected components\n components = mesh.split(only_watertight=False)\n if self.get_largest_components:\n areas = np.array([c.area for c in components], dtype=np.float)\n mesh = components[areas.argmax()]\n else:\n new_components = []\n for comp in components:\n if comp.area > self.remove_small_geometry_threshold * self.scale * self.scale:\n new_components.append(comp)\n mesh = trimesh.util.concatenate(new_components)\n vertices = mesh.vertices\n faces = mesh.faces\n\n if color:\n if self.color_mesh_extraction_method == 'direct_point_query':\n # color is extracted by passing the coordinates of mesh vertices through the network\n points = torch.from_numpy(vertices)\n z = []\n for i, pnts in enumerate(\n torch.split(points, self.points_batch_size, dim=0)):\n ret = self.eval_points(\n pnts.to(device).float(), decoders, tsdf_volume, self.tsdf_bnds, c, 'color',\n device)\n z_color = ret.cpu()[..., :3]\n z.append(z_color)\n z = torch.cat(z, axis=0)\n vertex_colors = z.numpy()\n\n vertex_colors = np.clip(vertex_colors, 0, 1) * 255\n vertex_colors = vertex_colors.astype(np.uint8)\n\n\n else:\n vertex_colors = None\n\n vertices /= self.scale\n mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)\n mesh.export(mesh_out_file)\n if self.verbose:\n print('Saved mesh at', mesh_out_file)\n\n return z_uni_m" }, { "identifier": "Renderer", "path": "src/utils/Renderer.py", "snippet": "class Renderer(object):\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n self.ray_batch_size = ray_batch_size\n self.points_batch_size = points_batch_size\n\n self.lindisp = cfg['rendering']['lindisp']\n self.perturb = cfg['rendering']['perturb']\n self.N_samples = cfg['rendering']['N_samples']\n self.N_surface = cfg['rendering']['N_surface']\n self.N_importance = cfg['rendering']['N_importance']\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.bound = slam.bound\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.vol_bnds\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.resolution = cfg['meshing']['resolution']\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n decoders (nn.module decoders): Decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): Feature grids. Defaults to None.\n stage (str, optional): Query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): CUDA device. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n weights = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, w = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n ret = ret.squeeze(0)\n\n\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100 \n rets.append(ret)\n weights.append(w)\n\n ret = torch.cat(rets, dim=0)\n weight = torch.cat(weights, dim=0)\n\n return ret, weight \n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n \n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def render_batch_ray(self, c, decoders, rays_d, rays_o, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Render color, depth and uncertainty of a batch of rays.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n rays_d (tensor, N*3): rays direction.\n rays_o (tensor, N*3): rays origin.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor): rendered depth.\n uncertainty (tensor): rendered uncertainty.\n color (tensor): rendered color.\n weight (tensor): attention weight.\n \"\"\"\n eval_tsdf_volume = tsdf_volume\n \n\n N_samples = self.N_samples\n N_surface = self.N_surface\n N_importance = self.N_importance\n\n N_rays = rays_o.shape[0]\n\n if gt_depth is None:\n N_surface = 0\n near = 0.01\n else:\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth_samples = gt_depth.repeat(1, N_samples)\n near = gt_depth_samples*0.01\n\n with torch.no_grad():\n det_rays_o = rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device) -\n det_rays_o)/det_rays_d # (N, 3, 2)\n far_bb, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n far_bb = far_bb.unsqueeze(-1)\n far_bb += 0.01\n\n if gt_depth is not None:\n # in case the bound is too large\n far = torch.clamp(far_bb, 0, torch.max(gt_depth*1.2))\n\n else:\n far = far_bb\n if N_surface > 0:\n if False:\n # this naive implementation downgrades performance\n gt_depth_surface = gt_depth.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).to(device)\n z_vals_surface = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n else:\n # since we want to colorize even on regions with no depth sensor readings,\n # meaning colorize on interpolated geometry region,\n # we sample all pixels (not using depth mask) for color loss.\n # Therefore, for pixels with non-zero depth value, we sample near the surface,\n # since it is not a good idea to sample 16 points near (half even behind) camera,\n # for pixels with zero depth value, we sample uniformly from camera to max_depth.\n gt_none_zero_mask = gt_depth > 0\n gt_none_zero = gt_depth[gt_none_zero_mask]\n gt_none_zero = gt_none_zero.unsqueeze(-1)\n gt_depth_surface = gt_none_zero.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).double().to(device)\n # emperical range 0.05*depth\n z_vals_surface_depth_none_zero = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n z_vals_surface = torch.zeros(\n gt_depth.shape[0], N_surface).to(device).double()\n gt_none_zero_mask = gt_none_zero_mask.squeeze(-1)\n z_vals_surface[gt_none_zero_mask,\n :] = z_vals_surface_depth_none_zero\n near_surface = 0.001\n far_surface = torch.max(gt_depth)\n z_vals_surface_depth_zero = near_surface * \\\n (1.-t_vals_surface) + far_surface * (t_vals_surface)\n z_vals_surface_depth_zero.unsqueeze(\n 0).repeat((~gt_none_zero_mask).sum(), 1)\n z_vals_surface[~gt_none_zero_mask,\n :] = z_vals_surface_depth_zero\n\n t_vals = torch.linspace(0., 1., steps=N_samples, device=device)\n\n if not self.lindisp:\n z_vals = near * (1.-t_vals) + far * (t_vals)\n else:\n z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))\n\n if self.perturb > 0.:\n # get intervals between samples\n mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n upper = torch.cat([mids, z_vals[..., -1:]], -1)\n lower = torch.cat([z_vals[..., :1], mids], -1)\n # stratified samples in those intervals\n t_rand = torch.rand(z_vals.shape).to(device)\n z_vals = lower + (upper - lower) * t_rand\n\n if N_surface > 0:\n z_vals, _ = torch.sort(\n torch.cat([z_vals, z_vals_surface.double()], -1), -1)\n\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples+N_surface, 3]\n pointsf = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n \n if N_importance > 0:\n z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n z_samples = sample_pdf(\n z_vals_mid, weights[..., 1:-1], N_importance, det=(self.perturb == 0.), device=device)\n z_samples = z_samples.detach()\n z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)\n\n pts = rays_o[..., None, :] + \\\n rays_d[..., None, :] * z_vals[..., :, None]\n pts = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n return depth, uncertainty, color, weight\n\n\n return depth, uncertainty, color, weight\n\n\n def render_img(self, c, decoders, c2w, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Renders out depth, uncertainty, and color images.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n c2w (tensor): camera to world matrix of current frame.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor, H*W): rendered depth image.\n uncertainty (tensor, H*W): rendered uncertainty image.\n color (tensor, H*W*3): rendered color image.\n \"\"\"\n \n with torch.no_grad():\n H = self.H\n W = self.W\n rays_o, rays_d = get_rays(\n H, W, self.fx, self.fy, self.cx, self.cy, c2w, device)\n rays_o = rays_o.reshape(-1, 3)\n rays_d = rays_d.reshape(-1, 3)\n\n depth_list = []\n uncertainty_list = []\n color_list = []\n\n\n ray_batch_size = self.ray_batch_size\n gt_depth = gt_depth.reshape(-1)\n\n for i in range(0, rays_d.shape[0], ray_batch_size):\n rays_d_batch = rays_d[i:i+ray_batch_size]\n rays_o_batch = rays_o[i:i+ray_batch_size]\n\n iter = 10\n\n if gt_depth is None:\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None)\n else:\n gt_depth_batch = gt_depth[i:i+ray_batch_size]\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=gt_depth_batch)\n\n depth, uncertainty, color, _= ret\n\n \n depth_list.append(depth.double())\n uncertainty_list.append(uncertainty.double())\n color_list.append(color)\n \n \n\n\n\n depth = torch.cat(depth_list, dim=0)\n uncertainty = torch.cat(uncertainty_list, dim=0)\n color = torch.cat(color_list, dim=0)\n \n depth = depth.reshape(H, W)\n uncertainty = uncertainty.reshape(H, W)\n color = color.reshape(H, W, 3)\n\n return depth, uncertainty, color " } ]
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,617
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self)
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self)
self.mesher = Mesher(cfg, args, self)
5
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_ranking.py
[ { "identifier": "UndefinedMetricWarning", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/exceptions.py", "snippet": "class UndefinedMetricWarning(UserWarning):\n \"\"\"Warning used when the metric is invalid\n\n .. versionchanged:: 0.18\n Moved from sklearn.base.\n \"\"\"" }, { "identifier": "label_binarize", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/preprocessing/_label.py", "snippet": "@validate_params(\n {\n \"y\": [\"array-like\"],\n \"classes\": [\"array-like\"],\n \"neg_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"pos_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"sparse_output\": [\"boolean\"],\n },\n prefer_skip_nested_validation=True,\n)\ndef label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape (n_classes,)\n Uniquely holds the label for each class.\n\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False,\n Set to true if output binary array is desired in CSR sparse format.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix will\n be of CSR format.\n\n See Also\n --------\n LabelBinarizer : Class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(\n y, input_name=\"y\", accept_sparse=\"csr\", ensure_2d=False, dtype=None\n )\n else:\n if _num_samples(y) == 0:\n raise ValueError(\"y has 0 samples: %r\" % y)\n if neg_label >= pos_label:\n raise ValueError(\n \"neg_label={0} must be strictly less than pos_label={1}.\".format(\n neg_label, pos_label\n )\n )\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\n \"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label)\n )\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if \"multioutput\" in y_type:\n raise ValueError(\n \"Multioutput target data is not supported with label binarization\"\n )\n if y_type == \"unknown\":\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if y_type == \"multilabel-indicator\":\n y_n_classes = y.shape[1] if hasattr(y, \"shape\") else len(y[0])\n if classes.size != y_n_classes:\n raise ValueError(\n \"classes {0} mismatch with the labels {1} found in the data\".format(\n classes, unique_labels(y)\n )\n )\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.isin(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\n \"%s target data is not supported with label binarization\" % y_type\n )\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y" }, { "identifier": "assert_all_finite", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def assert_all_finite(\n X,\n *,\n allow_nan=False,\n estimator_name=None,\n input_name=\"\",\n):\n \"\"\"Throw a ValueError if X contains NaN or infinity.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix}\n The input data.\n\n allow_nan : bool, default=False\n If True, do not throw error when `X` contains NaN.\n\n estimator_name : str, default=None\n The estimator name, used to construct the error message.\n\n input_name : str, default=\"\"\n The data name used to construct the error message. In particular\n if `input_name` is \"X\" and the data has NaN values and\n allow_nan is False, the error message will link to the imputer\n documentation.\n \"\"\"\n _assert_all_finite(\n X.data if sp.issparse(X) else X,\n allow_nan=allow_nan,\n estimator_name=estimator_name,\n input_name=input_name,\n )" }, { "identifier": "check_array", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_array(\n array,\n accept_sparse=False,\n *,\n accept_large_sparse=True,\n dtype=\"numeric\",\n order=None,\n copy=False,\n force_all_finite=True,\n ensure_2d=True,\n allow_nd=False,\n ensure_min_samples=1,\n ensure_min_features=1,\n estimator=None,\n input_name=\"\",\n):\n \"\"\"Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is checked to be a non-empty 2D array containing\n only finite values. If the dtype of the array is object, attempt\n converting to float, raising on failure.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : str, bool or list/tuple of str, default=False\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n accept_large_sparse : bool, default=True\n If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by\n accept_sparse, accept_large_sparse=False will cause it to be accepted\n only if its indices are stored with a 32-bit dtype.\n\n .. versionadded:: 0.20\n\n dtype : 'numeric', type, list of type or None, default='numeric'\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n If dtype is a list of types, conversion on the first type is only\n performed if the dtype of the input is not in the list.\n\n order : {'F', 'C'} or None, default=None\n Whether an array will be forced to be fortran or c-style.\n When order is None (default), then if copy=False, nothing is ensured\n about the memory layout of the output array; otherwise (copy=True)\n the memory layout of the returned array is kept as close as possible\n to the original array.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n ensure_2d : bool, default=True\n Whether to raise a value error if array is not 2D.\n\n allow_nd : bool, default=False\n Whether to allow array.ndim > 2.\n\n ensure_min_samples : int, default=1\n Make sure that the array has a minimum number of samples in its first\n axis (rows for a 2D array). Setting to 0 disables this check.\n\n ensure_min_features : int, default=1\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when the input data has effectively 2\n dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0\n disables this check.\n\n estimator : str or estimator instance, default=None\n If passed, include the name of the estimator in warning messages.\n\n input_name : str, default=\"\"\n The data name used to construct the error message. In particular\n if `input_name` is \"X\" and the data has NaN values and\n allow_nan is False, the error message will link to the imputer\n documentation.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n array_converted : object\n The converted and validated array.\n \"\"\"\n if isinstance(array, np.matrix):\n raise TypeError(\n \"np.matrix is not supported. Please convert to a numpy array with \"\n \"np.asarray. For more information see: \"\n \"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html\"\n )\n\n xp, is_array_api_compliant = get_namespace(array)\n\n # store reference to original array to check if copy is needed when\n # function returns\n array_orig = array\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = isinstance(dtype, str) and dtype == \"numeric\"\n\n dtype_orig = getattr(array, \"dtype\", None)\n if not is_array_api_compliant and not hasattr(dtype_orig, \"kind\"):\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n\n # check if the object contains several dtypes (typically a pandas\n # DataFrame), and store them. If not, store None.\n dtypes_orig = None\n pandas_requires_conversion = False\n if hasattr(array, \"dtypes\") and hasattr(array.dtypes, \"__array__\"):\n # throw warning if columns are sparse. If all columns are sparse, then\n # array.sparse exists and sparsity will be preserved (later).\n with suppress(ImportError):\n from pandas import SparseDtype\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if not hasattr(array, \"sparse\") and array.dtypes.apply(is_sparse).any():\n warnings.warn(\n \"pandas.DataFrame with sparse columns found.\"\n \"It will be converted to a dense numpy array.\"\n )\n\n dtypes_orig = list(array.dtypes)\n pandas_requires_conversion = any(\n _pandas_dtype_needs_early_conversion(i) for i in dtypes_orig\n )\n if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):\n dtype_orig = np.result_type(*dtypes_orig)\n elif pandas_requires_conversion and any(d == object for d in dtypes_orig):\n # Force object if any of the dtypes is an object\n dtype_orig = object\n\n elif (_is_extension_array_dtype(array) or hasattr(array, \"iloc\")) and hasattr(\n array, \"dtype\"\n ):\n # array is a pandas series\n pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)\n if isinstance(array.dtype, np.dtype):\n dtype_orig = array.dtype\n else:\n # Set to None to let array.astype work out the best dtype\n dtype_orig = None\n\n if dtype_numeric:\n if (\n dtype_orig is not None\n and hasattr(dtype_orig, \"kind\")\n and dtype_orig.kind == \"O\"\n ):\n # if input is object, convert to float.\n dtype = xp.float64\n else:\n dtype = None\n\n if isinstance(dtype, (list, tuple)):\n if dtype_orig is not None and dtype_orig in dtype:\n # no dtype conversion required\n dtype = None\n else:\n # dtype conversion required. Let's select the first element of the\n # list of accepted types.\n dtype = dtype[0]\n\n if pandas_requires_conversion:\n # pandas dataframe requires conversion earlier to handle extension dtypes with\n # nans\n # Use the original dtype for conversion if dtype is None\n new_dtype = dtype_orig if dtype is None else dtype\n array = array.astype(new_dtype)\n # Since we converted here, we do not need to convert again later\n dtype = None\n\n if dtype is not None and _is_numpy_namespace(xp):\n dtype = np.dtype(dtype)\n\n if force_all_finite not in (True, False, \"allow-nan\"):\n raise ValueError(\n 'force_all_finite should be a bool or \"allow-nan\". Got {!r} instead'.format(\n force_all_finite\n )\n )\n\n if dtype is not None and _is_numpy_namespace(xp):\n # convert to dtype object to conform to Array API to be use `xp.isdtype` later\n dtype = np.dtype(dtype)\n\n estimator_name = _check_estimator_name(estimator)\n context = \" by %s\" % estimator_name if estimator is not None else \"\"\n\n # When all dataframe columns are sparse, convert to a sparse array\n if hasattr(array, \"sparse\") and array.ndim > 1:\n with suppress(ImportError):\n from pandas import SparseDtype # noqa: F811\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if array.dtypes.apply(is_sparse).all():\n # DataFrame.sparse only supports `to_coo`\n array = array.sparse.to_coo()\n if array.dtype == np.dtype(\"object\"):\n unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])\n if len(unique_dtypes) > 1:\n raise ValueError(\n \"Pandas DataFrame with mixed sparse extension arrays \"\n \"generated a sparse matrix with object dtype which \"\n \"can not be converted to a scipy sparse matrix.\"\n \"Sparse extension arrays should all have the same \"\n \"numeric type.\"\n )\n\n if sp.issparse(array):\n _ensure_no_complex_data(array)\n array = _ensure_sparse_format(\n array,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n accept_large_sparse=accept_large_sparse,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n else:\n # If np.array(..) gives ComplexWarning, then we convert the warning\n # to an error. This is needed because specifying a non complex\n # dtype to the function converts complex to real dtype,\n # thereby passing the test made in the lines following the scope\n # of warnings context manager.\n with warnings.catch_warnings():\n try:\n warnings.simplefilter(\"error\", ComplexWarning)\n if dtype is not None and xp.isdtype(dtype, \"integral\"):\n # Conversion float -> int should not contain NaN or\n # inf (numpy#14412). We cannot use casting='safe' because\n # then conversion float -> int would be disallowed.\n array = _asarray_with_order(array, order=order, xp=xp)\n if xp.isdtype(array.dtype, (\"real floating\", \"complex floating\")):\n _assert_all_finite(\n array,\n allow_nan=False,\n msg_dtype=dtype,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n array = xp.astype(array, dtype, copy=False)\n else:\n array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)\n except ComplexWarning as complex_warning:\n raise ValueError(\n \"Complex data not supported\\n{}\\n\".format(array)\n ) from complex_warning\n\n # It is possible that the np.array(..) gave no warning. This happens\n # when no dtype conversion happened, for example dtype = None. The\n # result is that np.array(..) produces an array of complex dtype\n # and we need to catch and raise exception for such cases.\n _ensure_no_complex_data(array)\n\n if ensure_2d:\n # If input is scalar raise error\n if array.ndim == 0:\n raise ValueError(\n \"Expected 2D array, got scalar array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n # If input is 1D raise error\n if array.ndim == 1:\n raise ValueError(\n \"Expected 2D array, got 1D array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n\n if dtype_numeric and hasattr(array.dtype, \"kind\") and array.dtype.kind in \"USV\":\n raise ValueError(\n \"dtype='numeric' is not compatible with arrays of bytes/strings.\"\n \"Convert your data to numeric values explicitly instead.\"\n )\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\n \"Found array with dim %d. %s expected <= 2.\"\n % (array.ndim, estimator_name)\n )\n\n if force_all_finite:\n _assert_all_finite(\n array,\n input_name=input_name,\n estimator_name=estimator_name,\n allow_nan=force_all_finite == \"allow-nan\",\n )\n\n if ensure_min_samples > 0:\n n_samples = _num_samples(array)\n if n_samples < ensure_min_samples:\n raise ValueError(\n \"Found array with %d sample(s) (shape=%s) while a\"\n \" minimum of %d is required%s.\"\n % (n_samples, array.shape, ensure_min_samples, context)\n )\n\n if ensure_min_features > 0 and array.ndim == 2:\n n_features = array.shape[1]\n if n_features < ensure_min_features:\n raise ValueError(\n \"Found array with %d feature(s) (shape=%s) while\"\n \" a minimum of %d is required%s.\"\n % (n_features, array.shape, ensure_min_features, context)\n )\n\n if copy:\n if _is_numpy_namespace(xp):\n # only make a copy if `array` and `array_orig` may share memory`\n if np.may_share_memory(array, array_orig):\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n else:\n # always make a copy for non-numpy arrays\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n\n return array" }, { "identifier": "check_consistent_length", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_consistent_length(*arrays):\n \"\"\"Check that all arrays have consistent first dimensions.\n\n Checks whether all objects in arrays have the same shape or length.\n\n Parameters\n ----------\n *arrays : list or tuple of input objects.\n Objects that will be checked for consistent length.\n \"\"\"\n\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of samples: %r\"\n % [int(l) for l in lengths]\n )" }, { "identifier": "column_or_1d", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def column_or_1d(y, *, dtype=None, warn=False):\n \"\"\"Ravel column or 1d numpy array, else raises an error.\n\n Parameters\n ----------\n y : array-like\n Input data.\n\n dtype : data-type, default=None\n Data type for `y`.\n\n .. versionadded:: 1.2\n\n warn : bool, default=False\n To control display of warnings.\n\n Returns\n -------\n y : ndarray\n Output data.\n\n Raises\n ------\n ValueError\n If `y` is not a 1D array or a 2D array with a single row or column.\n \"\"\"\n xp, _ = get_namespace(y)\n y = check_array(\n y,\n ensure_2d=False,\n dtype=dtype,\n input_name=\"y\",\n force_all_finite=False,\n ensure_min_samples=0,\n )\n\n shape = y.shape\n if len(shape) == 1:\n return _asarray_with_order(xp.reshape(y, (-1,)), order=\"C\", xp=xp)\n if len(shape) == 2 and shape[1] == 1:\n if warn:\n warnings.warn(\n (\n \"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\"\n ),\n DataConversionWarning,\n stacklevel=2,\n )\n return _asarray_with_order(xp.reshape(y, (-1,)), order=\"C\", xp=xp)\n\n raise ValueError(\n \"y should be a 1d array, got an array of shape {} instead.\".format(shape)\n )" }, { "identifier": "_encode", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_encode.py", "snippet": "def _encode(values, *, uniques, check_unknown=True):\n \"\"\"Helper function to encode values into [0, n_uniques - 1].\n\n Uses pure python method for object dtype, and numpy method for\n all other dtypes.\n The numpy method has the limitation that the `uniques` need to\n be sorted. Importantly, this is not checked but assumed to already be\n the case. The calling method needs to ensure this for all non-object\n values.\n\n Parameters\n ----------\n values : ndarray\n Values to encode.\n uniques : ndarray\n The unique values in `values`. If the dtype is not object, then\n `uniques` needs to be sorted.\n check_unknown : bool, default=True\n If True, check for values in `values` that are not in `unique`\n and raise an error. This is ignored for object dtype, and treated as\n True in this case. This parameter is useful for\n _BaseEncoder._transform() to avoid calling _check_unknown()\n twice.\n\n Returns\n -------\n encoded : ndarray\n Encoded values\n \"\"\"\n if values.dtype.kind in \"OUS\":\n try:\n return _map_to_integer(values, uniques)\n except KeyError as e:\n raise ValueError(f\"y contains previously unseen labels: {str(e)}\")\n else:\n if check_unknown:\n diff = _check_unknown(values, uniques)\n if diff:\n raise ValueError(f\"y contains previously unseen labels: {str(diff)}\")\n return np.searchsorted(uniques, values)" }, { "identifier": "_unique", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_encode.py", "snippet": "def _unique(values, *, return_inverse=False, return_counts=False):\n \"\"\"Helper function to find unique values with support for python objects.\n\n Uses pure python method for object dtype, and numpy method for\n all other dtypes.\n\n Parameters\n ----------\n values : ndarray\n Values to check for unknowns.\n\n return_inverse : bool, default=False\n If True, also return the indices of the unique values.\n\n return_counts : bool, default=False\n If True, also return the number of times each unique item appears in\n values.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n\n unique_inverse : ndarray\n The indices to reconstruct the original array from the unique array.\n Only provided if `return_inverse` is True.\n\n unique_counts : ndarray\n The number of times each of the unique values comes up in the original\n array. Only provided if `return_counts` is True.\n \"\"\"\n if values.dtype == object:\n return _unique_python(\n values, return_inverse=return_inverse, return_counts=return_counts\n )\n # numerical\n return _unique_np(\n values, return_inverse=return_inverse, return_counts=return_counts\n )" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "validate_params", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "def validate_params(parameter_constraints, *, prefer_skip_nested_validation):\n \"\"\"Decorator to validate types and values of functions and methods.\n\n Parameters\n ----------\n parameter_constraints : dict\n A dictionary `param_name: list of constraints`. See the docstring of\n `validate_parameter_constraints` for a description of the accepted constraints.\n\n Note that the *args and **kwargs parameters are not validated and must not be\n present in the parameter_constraints dictionary.\n\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called by the decorated function will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most functions, except for those that receive\n non-validated objects as parameters or that are just wrappers around classes\n because they only perform a partial validation.\n\n Returns\n -------\n decorated_function : function or method\n The decorated function.\n \"\"\"\n\n def decorator(func):\n # The dict of parameter constraints is set as an attribute of the function\n # to make it possible to dynamically introspect the constraints for\n # automatic testing.\n setattr(func, \"_skl_parameter_constraints\", parameter_constraints)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n if global_skip_validation:\n return func(*args, **kwargs)\n\n func_sig = signature(func)\n\n # Map *args/**kwargs to the function signature\n params = func_sig.bind(*args, **kwargs)\n params.apply_defaults()\n\n # ignore self/cls and positional/keyword markers\n to_ignore = [\n p.name\n for p in func_sig.parameters.values()\n if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)\n ]\n to_ignore += [\"self\", \"cls\"]\n params = {k: v for k, v in params.arguments.items() if k not in to_ignore}\n\n validate_parameter_constraints(\n parameter_constraints, params, caller_name=func.__qualname__\n )\n\n try:\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return func(*args, **kwargs)\n except InvalidParameterError as e:\n # When the function is just a wrapper around an estimator, we allow\n # the function to delegate validation to the estimator, but we replace\n # the name of the estimator by the name of the function in the error\n # message to avoid confusion.\n msg = re.sub(\n r\"parameter of \\w+ must be\",\n f\"parameter of {func.__qualname__} must be\",\n str(e),\n )\n raise InvalidParameterError(msg) from e\n\n return wrapper\n\n return decorator" }, { "identifier": "stable_cumsum", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/extmath.py", "snippet": "def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):\n \"\"\"Use high precision for cumsum and check that final value matches sum.\n\n Warns if the final cumulative sum does not match the sum (up to the chosen\n tolerance).\n\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat.\n axis : int, default=None\n Axis along which the cumulative sum is computed.\n The default (None) is to compute the cumsum over the flattened array.\n rtol : float, default=1e-05\n Relative tolerance, see ``np.allclose``.\n atol : float, default=1e-08\n Absolute tolerance, see ``np.allclose``.\n\n Returns\n -------\n out : ndarray\n Array with the cumulative sums along the chosen axis.\n \"\"\"\n out = np.cumsum(arr, axis=axis, dtype=np.float64)\n expected = np.sum(arr, axis=axis, dtype=np.float64)\n if not np.all(\n np.isclose(\n out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True\n )\n ):\n warnings.warn(\n (\n \"cumsum was found to be unstable: \"\n \"its last element does not correspond to sum\"\n ),\n RuntimeWarning,\n )\n return out" }, { "identifier": "trapezoid", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/fixes.py", "snippet": "def _object_dtype_isnan(X):\ndef _percentile(a, q, *, method=\"linear\", **kwargs):\ndef _get_threadpool_controller():\ndef threadpool_limits(limits=None, user_api=None):\ndef threadpool_info():\ndef delayed(function):\ndef _mode(a, axis=0):\n def _sparse_linalg_cg(A, b, **kwargs):\ndef _open_text(data_module, data_file_name):\ndef _open_binary(data_module, data_file_name):\ndef _read_text(descr_module, descr_file_name):\ndef _path(data_module, data_file_name):\ndef _is_resource(data_module, data_file_name):\ndef _contents(data_module):" }, { "identifier": "type_of_target", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/multiclass.py", "snippet": "def type_of_target(y, input_name=\"\"):\n \"\"\"Determine the type of data indicated by the target.\n\n Note that this type is the most specific type that can be inferred.\n For example:\n\n * ``binary`` is more specific but compatible with ``multiclass``.\n * ``multiclass`` of integers is more specific but compatible with\n ``continuous``.\n * ``multilabel-indicator`` is more specific but compatible with\n ``multiclass-multioutput``.\n\n Parameters\n ----------\n y : {array-like, sparse matrix}\n Target values. If a sparse matrix, `y` is expected to be a\n CSR/CSC matrix.\n\n input_name : str, default=\"\"\n The data name used to construct the error message.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n target_type : str\n One of:\n\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d array of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d array that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, an array\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n array, sequence of sequences, or an array of non-sequence objects.\n\n Examples\n --------\n >>> from sklearn.utils.multiclass import type_of_target\n >>> import numpy as np\n >>> type_of_target([0.1, 0.6])\n 'continuous'\n >>> type_of_target([1, -1, -1, 1])\n 'binary'\n >>> type_of_target(['a', 'b', 'a'])\n 'binary'\n >>> type_of_target([1.0, 2.0])\n 'binary'\n >>> type_of_target([1, 0, 2])\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0])\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c'])\n 'multiclass'\n >>> type_of_target(np.array([[1, 2], [3, 1]]))\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]])\n 'multilabel-indicator'\n >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))\n 'continuous-multioutput'\n >>> type_of_target(np.array([[0, 1], [1, 1]]))\n 'multilabel-indicator'\n \"\"\"\n xp, is_array_api_compliant = get_namespace(y)\n valid = (\n (isinstance(y, Sequence) or issparse(y) or hasattr(y, \"__array__\"))\n and not isinstance(y, str)\n or is_array_api_compliant\n )\n\n if not valid:\n raise ValueError(\n \"Expected array-like (array or non-string sequence), got %r\" % y\n )\n\n sparse_pandas = y.__class__.__name__ in [\"SparseSeries\", \"SparseArray\"]\n if sparse_pandas:\n raise ValueError(\"y cannot be class 'SparseSeries' or 'SparseArray'\")\n\n if is_multilabel(y):\n return \"multilabel-indicator\"\n\n # DeprecationWarning will be replaced by ValueError, see NEP 34\n # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html\n # We therefore catch both deprecation (NumPy < 1.24) warning and\n # value error (NumPy >= 1.24).\n check_y_kwargs = dict(\n accept_sparse=True,\n allow_nd=True,\n force_all_finite=False,\n ensure_2d=False,\n ensure_min_samples=0,\n ensure_min_features=0,\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", VisibleDeprecationWarning)\n if not issparse(y):\n try:\n y = check_array(y, dtype=None, **check_y_kwargs)\n except (VisibleDeprecationWarning, ValueError) as e:\n if str(e).startswith(\"Complex data not supported\"):\n raise\n\n # dtype=object should be provided explicitly for ragged arrays,\n # see NEP 34\n y = check_array(y, dtype=object, **check_y_kwargs)\n\n # The old sequence of sequences format\n try:\n if (\n not hasattr(y[0], \"__array__\")\n and isinstance(y[0], Sequence)\n and not isinstance(y[0], str)\n ):\n raise ValueError(\n \"You appear to be using a legacy multi-label data\"\n \" representation. Sequence of sequences are no\"\n \" longer supported; use a binary array or sparse\"\n \" matrix instead - the MultiLabelBinarizer\"\n \" transformer can convert to this format.\"\n )\n except IndexError:\n pass\n\n # Invalid inputs\n if y.ndim not in (1, 2):\n # Number of dimension greater than 2: [[[1, 2]]]\n return \"unknown\"\n if not min(y.shape):\n # Empty ndarray: []/[[]]\n if y.ndim == 1:\n # 1-D empty array: []\n return \"binary\" # []\n # 2-D empty array: [[]]\n return \"unknown\"\n if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):\n # [obj_1] and not [\"label_1\"]\n return \"unknown\"\n\n # Check if multioutput\n if y.ndim == 2 and y.shape[1] > 1:\n suffix = \"-multioutput\" # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # Check float and contains non-integer float values\n if xp.isdtype(y.dtype, \"real floating\"):\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n data = y.data if issparse(y) else y\n if xp.any(data != xp.astype(data, int)):\n _assert_all_finite(data, input_name=input_name)\n return \"continuous\" + suffix\n\n # Check multiclass\n first_row = y[0] if not issparse(y) else y.getrow(0).data\n if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1):\n # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n return \"multiclass\" + suffix\n else:\n return \"binary\" # [1, 2] or [[\"a\"], [\"b\"]]" }, { "identifier": "count_nonzero", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/sparsefuncs.py", "snippet": "def count_nonzero(X, axis=None, sample_weight=None):\n \"\"\"A variant of X.getnnz() with extension to weighting on axis 0.\n\n Useful in efficiently calculating multilabel metrics.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_labels)\n Input data. It should be of CSR format.\n\n axis : {0, 1}, default=None\n The axis on which the data is aggregated.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weight for each row of X.\n\n Returns\n -------\n nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)\n Number of non-zero values in the array along a given axis. Otherwise,\n the total number of non-zero values in the array is returned.\n \"\"\"\n if axis == -1:\n axis = 1\n elif axis == -2:\n axis = 0\n elif X.format != \"csr\":\n raise TypeError(\"Expected CSR sparse format, got {0}\".format(X.format))\n\n # We rely here on the fact that np.diff(Y.indptr) for a CSR\n # will return the number of nonzero entries in each row.\n # A bincount over Y.indices will return the number of nonzeros\n # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.\n if axis is None:\n if sample_weight is None:\n return X.nnz\n else:\n return np.dot(np.diff(X.indptr), sample_weight)\n elif axis == 1:\n out = np.diff(X.indptr)\n if sample_weight is None:\n # astype here is for consistency with axis=0 dtype\n return out.astype(\"intp\")\n return out * sample_weight\n elif axis == 0:\n if sample_weight is None:\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n weights = np.repeat(sample_weight, np.diff(X.indptr))\n return np.bincount(X.indices, minlength=X.shape[1], weights=weights)\n else:\n raise ValueError(\"Unsupported axis: {0}\".format(axis))" }, { "identifier": "_check_pos_label_consistency", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_pos_label_consistency(pos_label, y_true):\n \"\"\"Check if `pos_label` need to be specified or not.\n\n In binary classification, we fix `pos_label=1` if the labels are in the set\n {-1, 1} or {0, 1}. Otherwise, we raise an error asking to specify the\n `pos_label` parameters.\n\n Parameters\n ----------\n pos_label : int, float, bool, str or None\n The positive label.\n y_true : ndarray of shape (n_samples,)\n The target vector.\n\n Returns\n -------\n pos_label : int, float, bool or str\n If `pos_label` can be inferred, it will be returned.\n\n Raises\n ------\n ValueError\n In the case that `y_true` does not have label in {-1, 1} or {0, 1},\n it will raise a `ValueError`.\n \"\"\"\n # ensure binary classification if pos_label is not specified\n # classes.dtype.kind in ('O', 'U', 'S') is required to avoid\n # triggering a FutureWarning by calling np.array_equal(a, b)\n # when elements in the two arrays are not comparable.\n classes = np.unique(y_true)\n if pos_label is None and (\n classes.dtype.kind in \"OUS\"\n or not (\n np.array_equal(classes, [0, 1])\n or np.array_equal(classes, [-1, 1])\n or np.array_equal(classes, [0])\n or np.array_equal(classes, [-1])\n or np.array_equal(classes, [1])\n )\n ):\n classes_repr = \", \".join([repr(c) for c in classes.tolist()])\n raise ValueError(\n f\"y_true takes value in {{{classes_repr}}} and pos_label is not \"\n \"specified: either make y_true take value in {0, 1} or \"\n \"{-1, 1} or pass pos_label explicitly.\"\n )\n elif pos_label is None:\n pos_label = 1\n\n return pos_label" }, { "identifier": "_check_sample_weight", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n input_name=\"sample_weight\",\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight" }, { "identifier": "_average_binary_score", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_base.py", "snippet": "def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):\n \"\"\"Average a binary metric for multilabel classification.\n\n Parameters\n ----------\n y_true : array, shape = [n_samples] or [n_samples, n_classes]\n True binary labels in binary label indicators.\n\n y_score : array, shape = [n_samples] or [n_samples, n_classes]\n Target scores, can either be probability estimates of the positive\n class, confidence values, or binary decisions.\n\n average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'\n If ``None``, the scores for each class are returned. Otherwise,\n this determines the type of averaging performed on the data:\n\n ``'micro'``:\n Calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n ``'weighted'``:\n Calculate metrics for each label, and find their average, weighted\n by support (the number of true instances for each label).\n ``'samples'``:\n Calculate metrics for each instance, and find their average.\n\n Will be ignored when ``y_true`` is binary.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n binary_metric : callable, returns shape [n_classes]\n The binary metric function to use.\n\n Returns\n -------\n score : float or array of shape [n_classes]\n If not ``None``, average the score, else return the score for each\n classes.\n\n \"\"\"\n average_options = (None, \"micro\", \"macro\", \"weighted\", \"samples\")\n if average not in average_options:\n raise ValueError(\"average has to be one of {0}\".format(average_options))\n\n y_type = type_of_target(y_true)\n if y_type not in (\"binary\", \"multilabel-indicator\"):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_type == \"binary\":\n return binary_metric(y_true, y_score, sample_weight=sample_weight)\n\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = check_array(y_true)\n y_score = check_array(y_score)\n\n not_average_axis = 1\n score_weight = sample_weight\n average_weight = None\n\n if average == \"micro\":\n if score_weight is not None:\n score_weight = np.repeat(score_weight, y_true.shape[1])\n y_true = y_true.ravel()\n y_score = y_score.ravel()\n\n elif average == \"weighted\":\n if score_weight is not None:\n average_weight = np.sum(\n np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0\n )\n else:\n average_weight = np.sum(y_true, axis=0)\n if np.isclose(average_weight.sum(), 0.0):\n return 0\n\n elif average == \"samples\":\n # swap average_weight <-> score_weight\n average_weight = score_weight\n score_weight = None\n not_average_axis = 0\n\n if y_true.ndim == 1:\n y_true = y_true.reshape((-1, 1))\n\n if y_score.ndim == 1:\n y_score = y_score.reshape((-1, 1))\n\n n_classes = y_score.shape[not_average_axis]\n score = np.zeros((n_classes,))\n for c in range(n_classes):\n y_true_c = y_true.take([c], axis=not_average_axis).ravel()\n y_score_c = y_score.take([c], axis=not_average_axis).ravel()\n score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)\n\n # Average the results\n if average is not None:\n if average_weight is not None:\n # Scores with 0 weights are forced to be 0, preventing the average\n # score from being affected by 0-weighted NaN elements.\n average_weight = np.asarray(average_weight)\n score[average_weight == 0] = 0\n return np.average(score, weights=average_weight)\n else:\n return score" }, { "identifier": "_average_multiclass_ovo_score", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_base.py", "snippet": "def _average_multiclass_ovo_score(binary_metric, y_true, y_score, average=\"macro\"):\n \"\"\"Average one-versus-one scores for multiclass classification.\n\n Uses the binary metric for one-vs-one multiclass classification,\n where the score is computed according to the Hand & Till (2001) algorithm.\n\n Parameters\n ----------\n binary_metric : callable\n The binary metric function to use that accepts the following as input:\n y_true_target : array, shape = [n_samples_target]\n Some sub-array of y_true for a pair of classes designated\n positive and negative in the one-vs-one scheme.\n y_score_target : array, shape = [n_samples_target]\n Scores corresponding to the probability estimates\n of a sample belonging to the designated positive class label\n\n y_true : array-like of shape (n_samples,)\n True multiclass labels.\n\n y_score : array-like of shape (n_samples, n_classes)\n Target scores corresponding to probability estimates of a sample\n belonging to a particular class.\n\n average : {'macro', 'weighted'}, default='macro'\n Determines the type of averaging performed on the pairwise binary\n metric scores:\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account. Classes\n are assumed to be uniformly distributed.\n ``'weighted'``:\n Calculate metrics for each label, taking into account the\n prevalence of the classes.\n\n Returns\n -------\n score : float\n Average of the pairwise binary metric scores.\n \"\"\"\n check_consistent_length(y_true, y_score)\n\n y_true_unique = np.unique(y_true)\n n_classes = y_true_unique.shape[0]\n n_pairs = n_classes * (n_classes - 1) // 2\n pair_scores = np.empty(n_pairs)\n\n is_weighted = average == \"weighted\"\n prevalence = np.empty(n_pairs) if is_weighted else None\n\n # Compute scores treating a as positive class and b as negative class,\n # then b as positive class and a as negative class\n for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):\n a_mask = y_true == a\n b_mask = y_true == b\n ab_mask = np.logical_or(a_mask, b_mask)\n\n if is_weighted:\n prevalence[ix] = np.average(ab_mask)\n\n a_true = a_mask[ab_mask]\n b_true = b_mask[ab_mask]\n\n a_true_score = binary_metric(a_true, y_score[ab_mask, a])\n b_true_score = binary_metric(b_true, y_score[ab_mask, b])\n pair_scores[ix] = (a_true_score + b_true_score) / 2\n\n return np.average(pair_scores, weights=prevalence)" } ]
import warnings import numpy as np from functools import partial from numbers import Integral, Real from scipy.sparse import csr_matrix, issparse from scipy.stats import rankdata from ..exceptions import UndefinedMetricWarning from ..preprocessing import label_binarize from ..utils import ( assert_all_finite, check_array, check_consistent_length, column_or_1d, ) from ..utils._encode import _encode, _unique from ..utils._param_validation import Interval, StrOptions, validate_params from ..utils.extmath import stable_cumsum from ..utils.fixes import trapezoid from ..utils.multiclass import type_of_target from ..utils.sparsefuncs import count_nonzero from ..utils.validation import _check_pos_label_consistency, _check_sample_weight from ._base import _average_binary_score, _average_multiclass_ovo_score
17,755
"Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight)
"""Metrics to assess performance on classification task given scores. Functions named as ``*_score`` return a scalar value to maximize: the higher the better. Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better. """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # Michal Karbownik <[email protected]> # License: BSD 3 clause @validate_params( {"x": ["array-like"], "y": ["array-like"]}, prefer_skip_nested_validation=True, ) def auc(x, y): """Compute Area Under the Curve (AUC) using the trapezoidal rule. This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. For an alternative way to summarize a precision-recall curve, see :func:`average_precision_score`. Parameters ---------- x : array-like of shape (n,) X coordinates. These must be either monotonic increasing or monotonic decreasing. y : array-like of shape (n,) Y coordinates. Returns ------- auc : float Area Under the Curve. See Also -------- roc_auc_score : Compute the area under the ROC curve. average_precision_score : Compute average precision from prediction scores. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError( "At least 2 points are needed to compute area under curve, but x.shape = %s" % x.shape ) direction = 1 dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) area = direction * trapezoid(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in trapezoid do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "samples", "weighted", "macro"}), None], "pos_label": [Real, str, "boolean"], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def average_precision_score( y_true, y_score, *, average="macro", pos_label=1, sample_weight=None ): """Compute average precision (AP) from prediction scores. AP summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold, with the increase in recall from the previous threshold used as the weight: .. math:: \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n where :math:`P_n` and :math:`R_n` are the precision and recall at the nth threshold [1]_. This implementation is not interpolated and is different from computing the area under the precision-recall curve with the trapezoidal rule, which uses linear interpolation and can be too optimistic. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True binary labels or binary label indicators. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by :term:`decision_function` on some classifiers). average : {'micro', 'samples', 'weighted', 'macro'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. pos_label : int, float, bool or str, default=1 The label of the positive class. Only applied to binary ``y_true``. For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- average_precision : float Average precision score. See Also -------- roc_auc_score : Compute the area under the ROC curve. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Notes ----- .. versionchanged:: 0.19 Instead of linearly interpolating between operating points, precisions are weighted by the change in recall since the last operating point. References ---------- .. [1] `Wikipedia entry for the Average precision <https://en.wikipedia.org/w/index.php?title=Information_retrieval& oldid=793358396#Average_precision>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) 0.83... >>> y_true = np.array([0, 0, 1, 1, 2, 2]) >>> y_scores = np.array([ ... [0.7, 0.2, 0.1], ... [0.4, 0.3, 0.3], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5], ... [0.4, 0.4, 0.2], ... [0.1, 0.2, 0.7], ... ]) >>> average_precision_score(y_true, y_scores) 0.77... """ def _binary_uninterpolated_average_precision( y_true, y_score, pos_label=1, sample_weight=None ): precision, recall, _ = precision_recall_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Return the step function integral # The following works because the last entry of precision is # guaranteed to be 1, as returned by precision_recall_curve return -np.sum(np.diff(recall) * np.array(precision)[:-1]) y_type = type_of_target(y_true, input_name="y_true") # Convert to Python primitive type to avoid NumPy type / Python str # comparison. See https://github.com/numpy/numpy/issues/6784 present_labels = np.unique(y_true).tolist() if y_type == "binary": if len(present_labels) == 2 and pos_label not in present_labels: raise ValueError( f"pos_label={pos_label} is not a valid label. It should be " f"one of {present_labels}" ) elif y_type == "multilabel-indicator" and pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multilabel-indicator y_true. " "Do not set pos_label or set pos_label to 1." ) elif y_type == "multiclass": if pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multiclass y_true. " "Do not set pos_label or set pos_label to 1." ) y_true = label_binarize(y_true, classes=present_labels) average_precision = partial( _binary_uninterpolated_average_precision, pos_label=pos_label ) return _average_binary_score( average_precision, y_true, y_score, average, sample_weight=sample_weight ) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def det_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute error rates for different probability thresholds. .. note:: This metric is used for evaluation of ranking and error tradeoffs of a binary classification task. Read more in the :ref:`User Guide <det_curve>`. .. versionadded:: 0.24 Parameters ---------- y_true : ndarray of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : ndarray of shape of (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fpr : ndarray of shape (n_thresholds,) False positive rate (FPR) such that element i is the false positive rate of predictions with score >= thresholds[i]. This is occasionally referred to as false acceptance probability or fall-out. fnr : ndarray of shape (n_thresholds,) False negative rate (FNR) such that element i is the false negative rate of predictions with score >= thresholds[i]. This is occasionally referred to as false rejection or miss rate. thresholds : ndarray of shape (n_thresholds,) Decreasing score values. See Also -------- DetCurveDisplay.from_estimator : Plot DET curve given an estimator and some data. DetCurveDisplay.from_predictions : Plot DET curve given the true and predicted labels. DetCurveDisplay : DET curve visualization. roc_curve : Compute Receiver operating characteristic (ROC) curve. precision_recall_curve : Compute precision-recall curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import det_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, fnr, thresholds = det_curve(y_true, y_scores) >>> fpr array([0.5, 0.5, 0. ]) >>> fnr array([0. , 0.5, 0.5]) >>> thresholds array([0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. Detection error " "tradeoff curve is not defined in that case." ) fns = tps[-1] - tps p_count = tps[-1] n_count = fps[-1] # start with false positives zero first_ind = ( fps.searchsorted(fps[0], side="right") - 1 if fps.searchsorted(fps[0], side="right") > 0 else None ) # stop with false negatives zero last_ind = tps.searchsorted(tps[-1]) + 1 sl = slice(first_ind, last_ind) # reverse the output such that list of false positives is decreasing return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1]) def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None): """Binary roc auc score.""" if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. ROC AUC score " "is not defined in that case." ) fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) if max_fpr is None or max_fpr == 1: return auc(fpr, tpr) if max_fpr <= 0 or max_fpr > 1: raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr) # Add a single point at max_fpr by linear interpolation stop = np.searchsorted(fpr, max_fpr, "right") x_interp = [fpr[stop - 1], fpr[stop]] y_interp = [tpr[stop - 1], tpr[stop]] tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp)) fpr = np.append(fpr[:stop], max_fpr) partial_auc = auc(fpr, tpr) # McClish correction: standardize result to be 0.5 if non-discriminant # and 1 if maximal min_area = 0.5 * max_fpr**2 max_area = max_fpr return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "macro", "samples", "weighted"}), None], "sample_weight": ["array-like", None], "max_fpr": [Interval(Real, 0.0, 1, closed="right"), None], "multi_class": [StrOptions({"raise", "ovr", "ovo"})], "labels": ["array-like", None], }, prefer_skip_nested_validation=True, ) def roc_auc_score( y_true, y_score, *, average="macro", sample_weight=None, max_fpr=None, multi_class="raise", labels=None, ): """Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \ from prediction scores. Note: this implementation can be used with binary, multiclass and multilabel classification, but some restrictions apply (see Parameters). Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True labels or binary label indicators. The binary and multiclass cases expect labels with shape (n_samples,) while the multilabel case expects binary label indicators with shape (n_samples, n_classes). y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. * In the binary case, it corresponds to an array of shape `(n_samples,)`. Both probability estimates and non-thresholded decision values can be provided. The probability estimates correspond to the **probability of the class with the greater label**, i.e. `estimator.classes_[1]` and thus `estimator.predict_proba(X, y)[:, 1]`. The decision values corresponds to the output of `estimator.decision_function(X, y)`. See more information in the :ref:`User guide <roc_auc_binary>`; * In the multiclass case, it corresponds to an array of shape `(n_samples, n_classes)` of probability estimates provided by the `predict_proba` method. The probability estimates **must** sum to 1 across the possible classes. In addition, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. See more information in the :ref:`User guide <roc_auc_multiclass>`; * In the multilabel case, it corresponds to an array of shape `(n_samples, n_classes)`. Probability estimates are provided by the `predict_proba` method and the non-thresholded decision values by the `decision_function` method. The probability estimates correspond to the **probability of the class with the greater label for each output** of the classifier. See more information in the :ref:`User guide <roc_auc_multilabel>`. average : {'micro', 'macro', 'samples', 'weighted'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Note: multiclass ROC AUC currently only handles the 'macro' and 'weighted' averages. For multiclass targets, `average=None` is only implemented for `multi_class='ovr'` and `average='micro'` is only implemented for `multi_class='ovr'`. ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. max_fpr : float > 0 and <= 1, default=None If not ``None``, the standardized partial AUC [2]_ over the range [0, max_fpr] is returned. For the multiclass case, ``max_fpr``, should be either equal to ``None`` or ``1.0`` as AUC ROC partial computation currently is not supported for multiclass. multi_class : {'raise', 'ovr', 'ovo'}, default='raise' Only used for multiclass targets. Determines the type of configuration to use. The default value raises an error, so either ``'ovr'`` or ``'ovo'`` must be passed explicitly. ``'ovr'``: Stands for One-vs-rest. Computes the AUC of each class against the rest [3]_ [4]_. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when ``average == 'macro'``, because class imbalance affects the composition of each of the 'rest' groupings. ``'ovo'``: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes [5]_. Insensitive to class imbalance when ``average == 'macro'``. labels : array-like of shape (n_classes,), default=None Only used for multiclass targets. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- auc : float Area Under the Curve score. See Also -------- average_precision_score : Area under the precision-recall curve. roc_curve : Compute Receiver operating characteristic (ROC) curve. RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_ .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving probability estimation trees (Section 6.2), CeDER Working Paper #IS-00-04, Stern School of Business, New York University. .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861-874. <https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_ .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems. Machine Learning, 45(2), 171-186. <http://link.springer.com/article/10.1023/A:1010920819831>`_ Examples -------- Binary case: >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.metrics import roc_auc_score >>> X, y = load_breast_cancer(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X)[:, 1]) 0.99... >>> roc_auc_score(y, clf.decision_function(X)) 0.99... Multiclass case: >>> from sklearn.datasets import load_iris >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear").fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr') 0.99... Multilabel case: >>> import numpy as np >>> from sklearn.datasets import make_multilabel_classification >>> from sklearn.multioutput import MultiOutputClassifier >>> X, y = make_multilabel_classification(random_state=0) >>> clf = MultiOutputClassifier(clf).fit(X, y) >>> # get a list of n_output containing probability arrays of shape >>> # (n_samples, n_classes) >>> y_pred = clf.predict_proba(X) >>> # extract the positive columns for each output >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred]) >>> roc_auc_score(y, y_pred, average=None) array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...]) >>> from sklearn.linear_model import RidgeClassifierCV >>> clf = RidgeClassifierCV().fit(X, y) >>> roc_auc_score(y, clf.decision_function(X), average=None) array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...]) """ y_type = type_of_target(y_true, input_name="y_true") y_true = check_array(y_true, ensure_2d=False, dtype=None) y_score = check_array(y_score, ensure_2d=False) if y_type == "multiclass" or ( y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2 ): # do not support partial ROC computation for multiclass if max_fpr is not None and max_fpr != 1.0: raise ValueError( "Partial AUC computation not available in " "multiclass setting, 'max_fpr' must be" " set to `None`, received `max_fpr={0}` " "instead".format(max_fpr) ) if multi_class == "raise": raise ValueError("multi_class must be in ('ovo', 'ovr')") return _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ) elif y_type == "binary": labels = np.unique(y_true) y_true = label_binarize(y_true, classes=labels)[:, 0] return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) else: # multilabel-indicator return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) def _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ): """Multiclass roc auc score. Parameters ---------- y_true : array-like of shape (n_samples,) True multiclass labels. y_score : array-like of shape (n_samples, n_classes) Target scores corresponding to probability estimates of a sample belonging to a particular class labels : array-like of shape (n_classes,) or None List of labels to index ``y_score`` used for multiclass. If ``None``, the lexical order of ``y_true`` is used to index ``y_score``. multi_class : {'ovr', 'ovo'} Determines the type of multiclass configuration to use. ``'ovr'``: Calculate metrics for the multiclass case using the one-vs-rest approach. ``'ovo'``: Calculate metrics for the multiclass case using the one-vs-one approach. average : {'micro', 'macro', 'weighted'} Determines the type of averaging performed on the pairwise binary metric scores ``'micro'``: Calculate metrics for the binarized-raveled classes. Only supported for `multi_class='ovr'`. .. versionadded:: 1.2 ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. Classes are assumed to be uniformly distributed. ``'weighted'``: Calculate metrics for each label, taking into account the prevalence of the classes. sample_weight : array-like of shape (n_samples,) or None Sample weights. """ # validation of the input y_score if not np.allclose(1, y_score.sum(axis=1)): raise ValueError( "Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
16
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.0,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(\n torch.maximum(\n s, self.thresholding_max_val * torch.ones_like(s).to(s.device)\n ),\n dims,\n )\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == \"logSNR\":\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(\n lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1\n ).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == \"time_uniform\":\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == \"time_quadratic\":\n t_order = 2\n t = (\n torch.linspace(t_T ** (1.0 / t_order), t_0 ** (1.0 / t_order), N + 1)\n .pow(t_order)\n .to(device)\n )\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(\n skip_type\n )\n )\n\n def get_orders_and_timesteps_for_singlestep_solver(\n self, steps, order, skip_type, t_T, t_0, device\n ):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [\n 3,\n ] * (\n K - 2\n ) + [2, 1]\n elif steps % 3 == 1:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [1]\n else:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [\n 2,\n ] * K\n else:\n K = steps // 2 + 1\n orders = [\n 2,\n ] * (\n K - 1\n ) + [1]\n elif order == 1:\n K = 1\n orders = [\n 1,\n ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == \"logSNR\":\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(\n torch.tensor(\n [\n 0,\n ]\n + orders\n ),\n 0,\n ).to(device)\n ]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(\n s\n ), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = sigma_t / sigma_s * x - alpha_t * phi_1 * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = torch.exp(log_alpha_t - log_alpha_s) * x - (sigma_t * phi_1) * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(\n self,\n x,\n s,\n t,\n r1=0.5,\n model_s=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r1) * (alpha_t * (phi_1 / h + 1.0)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r1) * (sigma_t * (phi_1 / h - 1.0)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(\n self,\n x,\n s,\n t,\n r1=1.0 / 3.0,\n r2=2.0 / 3.0,\n model_s=None,\n model_s1=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 1.0 / 3.0\n if r2 is None:\n r2 = 2.0 / 3.0\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(s2),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_s2, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(s2),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_s2, alpha_t = (\n torch.exp(log_alpha_s1),\n torch.exp(log_alpha_s2),\n torch.exp(log_alpha_t),\n )\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.0\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.0\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (torch.exp(log_alpha_s1 - log_alpha_s)) * x - (\n sigma_s1 * phi_11\n ) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1, \"model_s2\": model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.0)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.0)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_2),\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1.0 / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(\n self,\n x,\n s,\n t,\n order,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n r1=None,\n r2=None,\n ):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, s, t, return_intermediate=return_intermediate\n )\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n )\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n r2=r2,\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(\n self, x, model_prev_list, t_prev_list, t, order, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, t_prev_list[-1], t, model_s=model_prev_list[-1]\n )\n elif order == 2:\n return self.multistep_dpm_solver_second_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n elif order == 3:\n return self.multistep_dpm_solver_third_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(\n self,\n x,\n order,\n t_T,\n t_0,\n h_init=0.05,\n atol=0.0078,\n rtol=0.05,\n theta=0.9,\n t_err=1e-5,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(\n x, s, t, return_intermediate=True\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, solver_type=solver_type, **kwargs\n )\n )\n elif order == 3:\n r1, r2 = 1.0 / 3.0, 2.0 / 3.0\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(\n x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs\n )\n )\n else:\n raise ValueError(\n \"For adaptive step size solver, order must be 2 or 3, got {}\".format(\n order\n )\n )\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(\n torch.ones_like(x).to(x) * atol,\n rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)),\n )\n norm_fn = lambda v: torch.sqrt(\n torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)\n )\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.0):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(\n theta * h * torch.float_power(E, -1.0 / order).float(),\n lambda_0 - lambda_s,\n )\n nfe += order\n print(\"adaptive solver nfe\", nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(\n self,\n x,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(\n x,\n steps=steps,\n t_start=t_0,\n t_end=t_T,\n order=order,\n skip_type=skip_type,\n method=method,\n lower_order_final=lower_order_final,\n denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol,\n rtol=rtol,\n return_intermediate=return_intermediate,\n )\n\n def sample(\n self,\n x,\n condition_func,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == \"adaptive\":\n x = self.dpm_solver_adaptive(\n x,\n order=order,\n t_T=t_T,\n t_0=t_0,\n atol=atol,\n rtol=rtol,\n solver_type=solver_type,\n )\n elif method == \"multistep\":\n assert steps >= order\n timesteps = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device\n )\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in range(order, steps + 1):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step_order,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n elif method in [\"singlestep\", \"singlestep_fixed\"]:\n if method == \"singlestep\":\n (\n timesteps_outer,\n orders,\n ) = self.get_orders_and_timesteps_for_singlestep_solver(\n steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T,\n t_0=t_0,\n device=device,\n )\n elif method == \"singlestep_fixed\":\n K = steps // order\n orders = [\n order,\n ] * K\n timesteps_outer = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device\n )\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(\n skip_type=skip_type,\n t_T=s.item(),\n t_0=t.item(),\n N=order,\n device=device,\n )\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = condition_func(x)\n x = self.singlestep_dpm_solver_update(\n x, s, t, order, solver_type=solver_type, r1=r1, r2=r2\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n x = condition_func(x)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n x = condition_func(x)\n if return_intermediate:\n intermediates.append(x)\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "diffuser/utils/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule=\"discrete\",\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.0,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise\n schedule are the default settings in DDPM and improved-DDPM:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n cosine_s: A `float` number. The hyperparameter in the cosine schedule.\n cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' or 'cosine' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in [\"discrete\", \"linear\", \"cosine\"]:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'\".format(\n schedule\n )\n )\n\n self.schedule = schedule\n if schedule == \"discrete\":\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.total_N = len(log_alphas)\n self.T = 1.0\n self.t_array = (\n torch.linspace(0.0, 1.0, self.total_N + 1)[1:]\n .reshape((1, -1))\n .to(dtype=dtype)\n )\n self.log_alpha_array = log_alphas.reshape(\n (\n 1,\n -1,\n )\n ).to(dtype=dtype)\n else:\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n self.cosine_s = 0.008\n self.cosine_beta_max = 999.0\n self.cosine_t_max = (\n math.atan(self.cosine_beta_max * (1.0 + self.cosine_s) / math.pi)\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n self.cosine_log_alpha_0 = math.log(\n math.cos(self.cosine_s / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n self.schedule = schedule\n if schedule == \"cosine\":\n # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.\n # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.\n self.T = 0.9946\n else:\n self.T = 1.0\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == \"discrete\":\n return interpolate_fn(\n t.reshape((-1, 1)),\n self.t_array.to(t.device),\n self.log_alpha_array.to(t.device),\n ).reshape((-1))\n elif self.schedule == \"linear\":\n return -0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n elif self.schedule == \"cosine\":\n log_alpha_fn = lambda s: torch.log(\n torch.cos((s + self.cosine_s) / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0\n return log_alpha_t\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1.0 - torch.exp(2.0 * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == \"linear\":\n tmp = (\n 2.0\n * (self.beta_1 - self.beta_0)\n * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n )\n Delta = self.beta_0**2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == \"discrete\":\n log_alpha = -0.5 * torch.logaddexp(\n torch.zeros((1,)).to(lamb.device), -2.0 * lamb\n )\n t = interpolate_fn(\n log_alpha.reshape((-1, 1)),\n torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]),\n )\n return t.reshape((-1,))\n else:\n log_alpha = -0.5 * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n t_fn = (\n lambda log_alpha_t: torch.arccos(\n torch.exp(log_alpha_t + self.cosine_log_alpha_0)\n )\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n t = t_fn(log_alpha)\n return t" }, { "identifier": "model_wrapper", "path": "diffuser/utils/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.0,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == \"discrete\":\n return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(x, t_input, **model_kwargs)\n else:\n output = model(x, t_input, cond, **model_kwargs)\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return (x - alpha_t * output) / sigma_t\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return alpha_t * output + sigma_t * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -sigma_t * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * sigma_t * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1.0 or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "Losses", "path": "diffuser/models/helpers.py", "snippet": "class SinusoidalPosEmb(nn.Module):\nclass Downsample1d(nn.Module):\nclass Upsample1d(nn.Module):\nclass Conv1dBlock(nn.Module):\nclass SelfAttention(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MlpSelfAttention(nn.Module):\nclass WeightedLoss(nn.Module):\nclass WeightedStateLoss(nn.Module):\nclass ValueLoss(nn.Module):\nclass WeightedL1(WeightedLoss):\nclass WeightedL2(WeightedLoss):\nclass WeightedStateL2(WeightedStateLoss):\nclass ValueL1(ValueLoss):\nclass ValueL2(ValueLoss):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, inp_channels, out_channels, kernel_size, mish=True, n_groups=8):\n def forward(self, x):\n def __init__(\n self,\n n_channels: int,\n qk_n_channels: int,\n v_n_channels: int,\n nheads: int = 4,\n residual: bool = False,\n use_state: bool = False,\n ):\n def forward(self, x, states: torch.Tensor = None):\n def __init__(self, num_hiddens, dropout: float = 0, max_len: int = 1000):\n def forward(self, X):\n def __init__(self, dim_in, dim_hidden=128):\n def forward(self, x):\ndef extract(a, t, x_shape):\ndef cosine_beta_schedule(timesteps, s=0.008, dtype=torch.float32):\ndef apply_conditioning(x, conditions, action_dim):\n def __init__(self, weights, action_dim):\n def forward(self, pred, targ):\n def __init__(self, weights):\n def forward(self, pred, targ):\n def __init__(self, *args):\n def forward(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n X = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1) / torch.pow(\n 10000, torch.arange(0, num_hiddens, 2, dtype=torch.float32) / num_hiddens\n )\n X = X + self.P[:, : X.shape[1], :].to(X.device)" } ]
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
20,349
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder
betas = cosine_beta_schedule(n_timesteps)
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "attempt_loadv5", "path": "models/experimental.py", "snippet": "def attempt_loadv5(weights, device=None, inplace=True, fuse=True):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n from models.yolo import Detect, Model\n\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location='cpu') # load\n ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model\n\n # Model compatibility updates\n if not hasattr(ckpt, 'stride'):\n ckpt.stride = torch.tensor([32.])\n if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):\n ckpt.names = dict(enumerate(ckpt.names)) # convert to dict\n\n model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode\n\n # Module compatibility updates\n for m in model.modules():\n t = type(m)\n if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n m.inplace = inplace # torch 1.7.0 compatibility\n if t is Detect and not isinstance(m.anchor_grid, list):\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n\n # Return model\n if len(model) == 1:\n return model[-1]\n\n # Return detection ensemble\n print(f'Ensemble created with {weights}\\n')\n for k in 'names', 'nc', 'yaml':\n setattr(model, k, getattr(model[0], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'\n return model" }, { "identifier": "attempt_load_zxy", "path": "models/experimental.py", "snippet": "def attempt_load_zxy(weights, device, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n attempt_download(w)\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].to(device).float().fuse().eval()) # FP32 model\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "Model", "path": "models/yolo.py", "snippet": "class Model(nn.Module):\n # def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n super(Model, self).__init__()\n self.traced = False\n if isinstance(cfg, dict):\n self.yaml = cfg # model dict\n else: # is *.yaml\n import yaml # for torch hub\n self.yaml_file = Path(cfg).name\n with open(cfg) as f:\n self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict\n\n # Define model\n ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels\n if nc and nc != self.yaml['nc']:\n logger.info(f\"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}\")\n self.yaml['nc'] = nc # override yaml value\n if anchors:\n logger.info(f'Overriding model.yaml anchors with anchors={anchors}')\n self.yaml['anchors'] = round(anchors) # override yaml value\n self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist\n # self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]).cuda() # model, savelist\n self.names = [str(i) for i in range(self.yaml['nc'])] # default names\n # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])\n\n # Build strides, anchors\n # m = self.model[-1] # Detect()\n m = self.model[-1] # Detect()\n if isinstance(m, Detect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IDetect):\n print('yolo.py-IDetect')\n # print('m', m) # m IDetect\n m.cuda()\n s = 256 # 2x min stride\n # m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]).cuda() # forward\n # print('m.device2', m.device)\n check_anchor_order(m)\n # print('m.device3', m.device)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IAuxDetect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward\n #print(m.stride)\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_aux_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IBin):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_bin() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IKeypoint):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_kpt() # only run once\n # print('Strides: %s' % m.stride.tolist())\n\n # Init weights, biases\n initialize_weights(self)\n self.info()\n logger.info('')\n\n def forward(self, x, augment=False, profile=False):\n # print('x', x.shape)\n if augment:\n img_size = x.shape[-2:] # height, width\n s = [1, 0.83, 0.67] # scales\n f = [None, 3, None] # flips (2-ud, 3-lr)\n y = [] # outputs\n for si, fi in zip(s, f):\n xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))\n yi = self.forward_once(xi)[0] # forward\n # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save\n yi[..., :4] /= si # de-scale\n if fi == 2:\n yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud\n elif fi == 3:\n yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr\n y.append(yi)\n # print('y', y.shape)\n return torch.cat(y, 1), None # augmented inference, train\n else:\n return self.forward_once(x, profile) # single-scale inference, train\n\n def forward_once(self, x, profile=False):\n # print('x1', x.shape)\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if not hasattr(self, 'traced'):\n self.traced=False\n\n if self.traced:\n if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint):\n break\n\n # print('profile', profile) # Flase\n if profile:\n c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin))\n o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS\n # print('o', o.shape)\n for _ in range(10):\n m(x.copy() if c else x)\n t = time_synchronized()\n for _ in range(10):\n m(x.copy() if c else x)\n dt.append((time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n # print('x3', x.shape)\n # print('m.i', m.i) # =len(y)\n x = m(x) # run\\\n \n y.append(x if m.i in self.save else None) # save output\n # print('x4', x.shape)\n\n if profile:\n print('%.1fms total' % sum(dt))\n\n # print('x', len(x)) # 3\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, mi2, s in zip(m.m, m.m2, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True)\n\n def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Bin() module\n bc = m.bin_count\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n old = b[:, (0,1,2,bc+3)].data\n obj_idx = 2*bc+4\n b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99))\n b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n b[:, (0,1,2,bc+3)].data = old\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for mi in m.m: # from\n b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))\n\n # def _print_weights(self):\n # for m in self.model.modules():\n # if type(m) is Bottleneck:\n # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers... ')\n for m in self.model.modules():\n if isinstance(m, RepConv):\n #print(f\" fuse_repvgg_block\")\n m.fuse_repvgg_block()\n elif isinstance(m, RepConv_OREPA):\n #print(f\" switch_to_deploy\")\n m.switch_to_deploy()\n elif type(m) is Conv and hasattr(m, 'bn'):\n m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv\n delattr(m, 'bn') # remove batchnorm\n m.forward = m.fuseforward # update forward\n elif isinstance(m, (IDetect, IAuxDetect)):\n m.fuse()\n m.forward = m.fuseforward\n self.info()\n return self\n\n def nms(self, mode=True): # add or remove NMS module\n present = type(self.model[-1]) is NMS # last layer is NMS\n if mode and not present:\n print('Adding NMS... ')\n m = NMS() # module\n m.f = -1 # from\n m.i = self.model[-1].i + 1 # index\n self.model.add_module(name='%s' % m.i, module=m) # add\n self.eval()\n elif not mode and present:\n print('Removing NMS... ')\n self.model = self.model[:-1] # remove\n return self\n\n def autoshape(self): # add autoShape module\n print('Adding autoShape... ')\n m = autoShape(self) # wrap model\n copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes\n return m\n\n def info(self, verbose=False, img_size=640): # print model information\n model_info(self, verbose, img_size)" }, { "identifier": "check_anchors", "path": "utils/autoanchor.py", "snippet": "def check_anchors(dataset, model, thr=4.0, imgsz=640):\n # Check anchor fit to data, recompute if necessary\n prefix = colorstr('autoanchor: ')\n print(f'\\n{prefix}Analyzing anchors... ', end='')\n m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()\n shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale\n wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh\n\n def metric(k): # compute metric\n r = wh[:, None] / k[None]\n x = torch.min(r, 1. / r).min(2)[0] # ratio metric\n best = x.max(1)[0] # best_x\n aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold\n bpr = (best > 1. / thr).float().mean() # best possible recall\n return bpr, aat\n\n anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors\n bpr, aat = metric(anchors)\n print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')\n if bpr < 0.98: # threshold to recompute\n print('. Attempting to improve anchors, please wait...')\n na = m.anchor_grid.numel() // 2 # number of anchors\n try:\n anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n except Exception as e:\n print(f'{prefix}ERROR: {e}')\n new_bpr = metric(anchors)[0]\n if new_bpr > bpr: # replace anchors\n anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference\n check_anchor_order(m)\n m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss\n print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')\n else:\n print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')\n print('') # newline" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "labels_to_class_weights", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\ndef init_seeds(seed=0):\ndef get_latest_run(search_dir='.'):\ndef isdocker():\ndef emojis(str=''):\ndef check_online():\ndef check_git_status():\ndef check_requirements(requirements='requirements.txt', exclude=()):\ndef check_img_size(img_size, s=32):\ndef check_imshow():\ndef check_file(file):\ndef check_dataset(dict):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\ndef clip_coords(boxes, img_shape):\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\ndef bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):\ndef box_iou(box1, box2):\n def box_area(box):\ndef wh_iou(wh1, wh2):\ndef box_giou(box1, box2):\n def box_area(box):\ndef box_ciou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef box_diou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\ndef non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), kpt_label=False, nc=None, nkpt=None):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=True, sep=''):" }, { "identifier": "attempt_download", "path": "utils/google_utils.py", "snippet": "def attempt_download(file, repo='WongKinYiu/yolov7'):\n # Attempt file download if does not exist\n file = Path(str(file).strip().replace(\"'\", '').lower())\n\n if not file.exists():\n try:\n response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api\n assets = [x['name'] for x in response['assets']] # release assets\n tag = response['tag_name'] # i.e. 'v1.0'\n except: # fallback plan\n assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', \n 'yolov7-e6e.pt', 'yolov7-w6.pt']\n tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]\n\n name = file.name\n if name in assets:\n msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'\n redundant = False # second download option\n try: # GitHub\n url = f'https://github.com/{repo}/releases/download/{tag}/{name}'\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert file.exists() and file.stat().st_size > 1E6 # check\n except Exception as e: # GCP\n print(f'Download error: {e}')\n assert redundant, 'No secondary mirror'\n url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'\n print(f'Downloading {url} to {file}...')\n os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)\n finally:\n if not file.exists() or file.stat().st_size < 1E6: # check\n file.unlink(missing_ok=True) # remove partial downloads\n print(f'ERROR: Download failure: {msg}')\n print('')\n return" }, { "identifier": "ComputeLoss", "path": "utils/loss.py", "snippet": "class ComputeLoss:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLoss, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets): # predictions, targets, model\n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets\n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = indices[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), tcls[i]] = self.cp\n #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n tcls, tbox, indices, anch = [], [], [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n tbox.append(torch.cat((gxy - gij, gwh), 1)) # box\n anch.append(anchors[a]) # anchors\n tcls.append(c) # class\n\n return tcls, tbox, indices, anch" }, { "identifier": "ComputeLossOTA", "path": "utils/loss.py", "snippet": "class ComputeLossOTA:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLossOTA, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors', 'stride':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets, imgs): # predictions, targets, model \n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)\n pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] \n \n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n grid = torch.stack([gi, gj], dim=1)\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n #pxy = ps[:, :2].sigmoid() * 3. - 1.\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]\n selected_tbox[:, :2] -= grid\n iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n selected_tcls = targets[i][:, 1].long()\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), selected_tcls] = self.cp\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets, imgs):\n \n #indices, anch = self.find_positive(p, targets)\n indices, anch = self.find_3_positive(p, targets)\n #indices, anch = self.find_4_positive(p, targets)\n #indices, anch = self.find_5_positive(p, targets)\n #indices, anch = self.find_9_positive(p, targets)\n device = torch.device(targets.device)\n matching_bs = [[] for pp in p]\n matching_as = [[] for pp in p]\n matching_gjs = [[] for pp in p]\n matching_gis = [[] for pp in p]\n matching_targets = [[] for pp in p]\n matching_anchs = [[] for pp in p]\n \n nl = len(p) \n \n for batch_idx in range(p[0].shape[0]):\n \n b_idx = targets[:, 0]==batch_idx\n this_target = targets[b_idx]\n if this_target.shape[0] == 0:\n continue\n \n txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]\n txyxy = xywh2xyxy(txywh)\n\n pxyxys = []\n p_cls = []\n p_obj = []\n from_which_layer = []\n all_b = []\n all_a = []\n all_gj = []\n all_gi = []\n all_anch = []\n \n for i, pi in enumerate(p):\n \n b, a, gj, gi = indices[i]\n idx = (b == batch_idx)\n b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] \n all_b.append(b)\n all_a.append(a)\n all_gj.append(gj)\n all_gi.append(gi)\n all_anch.append(anch[i][idx])\n from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device))\n \n fg_pred = pi[b, a, gj, gi] \n p_obj.append(fg_pred[:, 4:5])\n p_cls.append(fg_pred[:, 5:])\n \n grid = torch.stack([gi, gj], dim=1)\n pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.\n #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]\n pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.\n pxywh = torch.cat([pxy, pwh], dim=-1)\n pxyxy = xywh2xyxy(pxywh)\n pxyxys.append(pxyxy)\n \n pxyxys = torch.cat(pxyxys, dim=0)\n if pxyxys.shape[0] == 0:\n continue\n p_obj = torch.cat(p_obj, dim=0)\n p_cls = torch.cat(p_cls, dim=0)\n from_which_layer = torch.cat(from_which_layer, dim=0)\n all_b = torch.cat(all_b, dim=0)\n all_a = torch.cat(all_a, dim=0)\n all_gj = torch.cat(all_gj, dim=0)\n all_gi = torch.cat(all_gi, dim=0)\n all_anch = torch.cat(all_anch, dim=0)\n \n pair_wise_iou = box_iou(txyxy, pxyxys)\n\n pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)\n\n top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)\n dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)\n\n gt_cls_per_image = (\n F.one_hot(this_target[:, 1].to(torch.int64), self.nc)\n .float()\n .unsqueeze(1)\n .repeat(1, pxyxys.shape[0], 1)\n )\n\n num_gt = this_target.shape[0]\n cls_preds_ = (\n p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n )\n\n y = cls_preds_.sqrt_()\n pair_wise_cls_loss = F.binary_cross_entropy_with_logits(\n torch.log(y/(1-y)) , gt_cls_per_image, reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n \n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_iou_loss\n )\n\n matching_matrix = torch.zeros_like(cost, device=device)\n\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1.0\n\n del top_k, dynamic_ks\n anchor_matching_gt = matching_matrix.sum(0)\n if (anchor_matching_gt > 1).sum() > 0:\n _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)\n matching_matrix[:, anchor_matching_gt > 1] *= 0.0\n matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0\n fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device)\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n \n from_which_layer = from_which_layer[fg_mask_inboxes]\n all_b = all_b[fg_mask_inboxes]\n all_a = all_a[fg_mask_inboxes]\n all_gj = all_gj[fg_mask_inboxes]\n all_gi = all_gi[fg_mask_inboxes]\n all_anch = all_anch[fg_mask_inboxes]\n \n this_target = this_target[matched_gt_inds]\n \n for i in range(nl):\n layer_idx = from_which_layer == i\n matching_bs[i].append(all_b[layer_idx])\n matching_as[i].append(all_a[layer_idx])\n matching_gjs[i].append(all_gj[layer_idx])\n matching_gis[i].append(all_gi[layer_idx])\n matching_targets[i].append(this_target[layer_idx])\n matching_anchs[i].append(all_anch[layer_idx])\n\n for i in range(nl):\n if matching_targets[i] != []:\n matching_bs[i] = torch.cat(matching_bs[i], dim=0)\n matching_as[i] = torch.cat(matching_as[i], dim=0)\n matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)\n matching_gis[i] = torch.cat(matching_gis[i], dim=0)\n matching_targets[i] = torch.cat(matching_targets[i], dim=0)\n matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)\n else:\n matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n\n return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs \n\n def find_3_positive(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n indices, anch = [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n anch.append(anchors[a]) # anchors\n\n return indices, anch" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "plot_labels", "path": "utils/plots.py", "snippet": "def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):\n # plot dataset labels\n print('Plotting labels... ')\n c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes\n nc = int(c.max() + 1) # number of classes\n colors = color_list()\n x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])\n\n # seaborn correlogram\n sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)\n plt.close()\n\n # matplotlib labels\n matplotlib.use('svg') # faster\n ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n ax[0].set_ylabel('instances')\n if 0 < len(names) < 30:\n ax[0].set_xticks(range(len(names)))\n ax[0].set_xticklabels(names, rotation=90, fontsize=10)\n else:\n ax[0].set_xlabel('classes')\n sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)\n sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)\n\n # rectangles\n labels[:, 1:3] = 0.5 # center\n labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n for cls, *box in labels[:1000]:\n ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot\n ax[1].imshow(img)\n ax[1].axis('off')\n\n for a in [0, 1, 2, 3]:\n for s in ['top', 'right', 'left', 'bottom']:\n ax[a].spines[s].set_visible(False)\n\n plt.savefig(save_dir / 'labels.jpg', dpi=200)\n matplotlib.use('Agg')\n plt.close()\n\n # loggers\n for k, v in loggers.items() or {}:\n if k == 'wandb' and v:\n v.log({\"Labels\": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)" }, { "identifier": "plot_results", "path": "utils/plots.py", "snippet": "def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):\n # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')\n fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n ax = ax.ravel()\n s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',\n 'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']\n if bucket:\n # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n files = ['results%g.txt' % x for x in id]\n c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)\n os.system(c)\n else:\n files = list(Path(save_dir).glob('results*.txt'))\n assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n for i in range(10):\n y = results[i, x]\n if i in [0, 1, 2, 5, 6, 7]:\n y[y == 0] = np.nan # don't show zero loss values\n # y /= y[0] # normalize\n label = labels[fi] if len(labels) else f.stem\n ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n # if i in [5, 6, 7]: # share train and val loss y axes\n # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n fig.savefig(Path(save_dir) / 'results.png', dpi=200)" }, { "identifier": "plot_evolution", "path": "utils/plots.py", "snippet": "def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()\n # Plot hyperparameter evolution results in evolve.txt\n with open(yaml_file) as f:\n hyp = yaml.load(f, Loader=yaml.SafeLoader)\n x = np.loadtxt('evolve.txt', ndmin=2)\n f = fitness(x)\n # weights = (f - f.min()) ** 2 # for weighted results\n plt.figure(figsize=(10, 12), tight_layout=True)\n matplotlib.rc('font', **{'size': 8})\n for i, (k, v) in enumerate(hyp.items()):\n y = x[:, i + 7]\n # mu = (y * weights).sum() / weights.sum() # best weighted result\n mu = y[f.argmax()] # best single result\n plt.subplot(6, 5, i + 1)\n plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')\n plt.plot(mu, f.max(), 'k+', markersize=15)\n plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters\n if i % 5 != 0:\n plt.yticks([])\n print('%15s: %.3g' % (k, mu))\n plt.savefig('evolve.png', dpi=200)\n print('\\nPlot saved as evolve.png')" }, { "identifier": "ModelEMA", "path": "utils/torch_utils.py", "snippet": "class ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "intersect_dicts", "path": "utils/torch_utils.py", "snippet": "def intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}" }, { "identifier": "torch_distributed_zero_first", "path": "utils/torch_utils.py", "snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()" }, { "identifier": "is_parallel", "path": "utils/torch_utils.py", "snippet": "def is_parallel(model):\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)" }, { "identifier": "getMask", "path": "utils/distill_utils.py", "snippet": "def getMask(batch_size, gt_boxes, img_size, feat, anchors, max_num_box, device):\r\n # [b, K, 4]\r\n gt_boxes = make_gt_boxes(gt_boxes, max_num_box, batch_size, img_size)\r\n feat_stride = img_size[0] / feat.size(2)\r\n anchors = torch.from_numpy(generate_anchors(feat_stride, anchors))\r\n feat = feat.cpu()\r\n height, width = feat.size(2), feat.size(3)\r\n feat_height, feat_width = feat.size(2), feat.size(3)\r\n shift_x = np.arange(0, feat_width) * feat_stride\r\n shift_y = np.arange(0, feat_height) * feat_stride\r\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\r\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\r\n shift_x.ravel(), shift_y.ravel())).transpose())\r\n shifts = shifts.contiguous().type_as(feat).float()\r\n\r\n # num of anchors [3]\r\n A = anchors.size(0)\r\n K = shifts.size(0)\r\n\r\n anchors = anchors.type_as(gt_boxes)\r\n # all_anchors [K, A, 4]\r\n all_anchors = anchors.view(1, A, 4) + shifts.view(K, 1, 4)\r\n all_anchors = all_anchors.view(K * A, 4)\r\n # compute iou [all_anchors, gt_boxes]\r\n IOU_map = bbox_overlaps_batch(all_anchors, gt_boxes, img_size).view(batch_size, height, width, A, gt_boxes.shape[1])\r\n\r\n mask_batch = []\r\n for i in range(batch_size):\r\n max_iou, _ = torch.max(IOU_map[i].view(height * width * A, gt_boxes.shape[1]), dim=0)\r\n mask_per_im = torch.zeros([height, width], dtype=torch.int64).to(device)\r\n for k in range(gt_boxes.shape[1]):\r\n if torch.sum(gt_boxes[i][k]) == 0:\r\n break\r\n max_iou_per_gt = max_iou[k] * 0.5\r\n mask_per_gt = torch.sum(IOU_map[i][:, :, :, k] > max_iou_per_gt, dim=2)\r\n mask_per_im += mask_per_gt.to(device)\r\n mask_batch.append(mask_per_im)\r\n return mask_batch\r" }, { "identifier": "compute_mask_loss", "path": "utils/distill_utils.py", "snippet": "def compute_mask_loss(mask_batch, student_feature, teacher_feature, imitation_loss_weight):\r\n mask_list = []\r\n for mask in mask_batch:\r\n mask = (mask > 0).float().unsqueeze(0)\r\n mask_list.append(mask)\r\n mask_batch = torch.stack(mask_list, dim=0)\r\n norms = mask_batch.sum() * 2\r\n mask_batch_s = mask_batch.unsqueeze(4)\r\n no = student_feature.size(-1)\r\n bs, na, height, width, _ = mask_batch_s.shape\r\n mask_batch_no = mask_batch_s.expand((bs, na, height, width, no))\r\n sup_loss = (torch.pow(teacher_feature - student_feature, 2) * mask_batch_no).sum() / norms\r\n sup_loss = sup_loss * imitation_loss_weight\r\n return sup_loss\r" } ]
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
20,427
# load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
14
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n units: int,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation='None',\n use_bias=True,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n\n self.units = units\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias: self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias: self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64) \n\n self.activation = activation\n self.use_bias = use_bias\n self.activation_fn = activations.get(activation)\n self.built = False\n\n def build(self, input_shape: list):\n if False: print(f\"input_shape={input_shape}\")\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.units,\n self.num_input_units,\n int(self.blade_indices_kernel.shape[0])\n ]\n if False: print(f\"shape_kernel={shape_kernel}\")\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.units, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-2], self.units, self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Perform a matrix-multiply, but using geometric product instead of\n # standard multiplication. To do this we do the geometric product\n # elementwise and then sum over the common axis.\n # [..., 1, I, X] * [..., O, I, X] -> [..., O, I, X] -> [..., O, X]\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(self.algebra.geom_prod(\n # inputs_expanded, w_geom), axis=-2)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod(inputs_expanded, w_geom).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n \"units\":\n self.units,\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n })\n return config" }, { "identifier": "GeometricSandwichProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductDense(GeometricProductDense):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric sandwich multiplication instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, units, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra, units,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n self.built = False\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Same as GeometricProductDense but using R*x*~R instead of just R*x\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(\n # self.algebra.geom_prod(\n # w_geom,\n # self.algebra.geom_prod(\n # inputs_expanded,\n # self.algebra.reversion(w_geom)\n # )\n # ),\n # axis=-2\n # )\n # if self.bias is not None:\n # b_geom = self.algebra.from_tensor(\n # self.bias, self.blade_indices_bias)\n # result += b_geom\n\n # return self.activation(result)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod( w_geom, self.algebra.geom_prod(inputs_expanded, self.algebra.reversion(w_geom))).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricProductElementwise(GeometricAlgebraLayer):\n \"\"\"Performs the elementwise geometric product with a list of multivectors\n with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n # super().__init__(algebra=algebra, activity_regularizer=activity_regularizer, **kwargs)\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n \n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.num_input_units,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.num_input_units,self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.num_input_units,\n # self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(inputs, w_geom)\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n # \"activation\":\n # self.activation,\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n })\n return config" }, { "identifier": "GeometricSandwichProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductElementwise(GeometricProductElementwise):\n \"\"\"Performs the elementwise geometric sandwich product with a list of\n multivectors with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor( self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication Rx~R for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(\n w_geom,\n self.algebra.geom_prod(\n inputs,\n self.algebra.reversion(w_geom)\n )\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductConv1D", "path": "torch_ga/layers.py", "snippet": "class GeometricProductConv1D(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Conv1D layer but using multivector-valued kernels\n instead of scalar ones and geometric product instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n filters: How many channels the output will have\n kernel_size: Size for the convolution kernel\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n filters: int,\n kernel_size: int,\n stride: int,\n padding: str,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n dilations: Union[None, int] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n algebra=algebra,\n # activity_regularizer=activity_regularizer,\n **kwargs\n )\n\n self.filters = filters\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilations = dilations\n\n self.blade_indices_kernel = torch.tensor( blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor( blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n # I: [..., S, C, B]\n self.num_input_filters = input_shape[-2]\n\n # K: [K, IC, OC, B]\n shape_kernel = [\n self.kernel_size,\n self.num_input_filters,\n self.filters,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def forward(self, inputs):\n if not self.built: \n self.build(inputs.shape)\n k_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n result = self.algebra.geom_conv1d(\n inputs, k_geom,\n stride=self.stride, padding=self.padding,\n dilations=self.dilations\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"filters\":\n self.filters,\n \"kernel_size\":\n self.kernel_size,\n \"stride\":\n self.stride,\n \"padding\":\n self.padding,\n \"dilations\":\n self.dilations,\n \"blade_indices_kernel\":\n self.blade_indices_kernel.numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.numpy(),\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n\n })\n\n return config" }, { "identifier": "GeometricAlgebraExp", "path": "torch_ga/layers.py", "snippet": "class GeometricAlgebraExp(GeometricAlgebraLayer):\n \"\"\"Calculates the exponential function of the input. Input must square to\n a scalar.\n\n Args:\n algebra: GeometricAlgebra instance to use\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n square_scalar_tolerance: Union[float, None] = 1e-4,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n self.square_scalar_tolerance = square_scalar_tolerance\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def build(self,inputs_shape): self.built = True\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n return self.algebra.exp(\n inputs, square_scalar_tolerance=self.square_scalar_tolerance\n )\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"square_scalar_tolerance\": self.square_scalar_tolerance\n })\n return config" }, { "identifier": "GeometricToTensor", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensor(GeometricAlgebraLayer):\n \"\"\"Layer for extracting given blades from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.blade_indices = torch.tensor(blade_indices).to(dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.blade_indices.shape[0]]\n def build(self,input_shape): self.built = True\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n # return torch.select(inputs, self.blade_indices, axis=-1)\n x = inputs[...,self.blade_indices]\n return x\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "GeometricToTensorWithKind", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensorWithKind(GeometricToTensor):\n \"\"\"Layer for extracting blades of a kind from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade indices of kind to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n blade_indices = algebra.get_kind_blade_indices(kind)\n super().__init__(algebra=algebra, blade_indices=blade_indices,\n **kwargs)" }, { "identifier": "TensorToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade indices to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices)\n def build(self,input_shape): self.built = True\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "TensorWithKindToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorWithKindToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade kind to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade kind indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.kind = kind\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.get_kind_blade_indices(self.kind).shape[0]]\n\n def build(self,input_shape): self.built = True\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n\n return self.algebra.from_tensor_with_kind(inputs, kind=self.kind)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"kind\": self.kind\n })\n return config" }, { "identifier": "BladeKind", "path": "torch_ga/blades.py", "snippet": "class BladeKind(Enum):\n \"\"\"Kind of blade depending on its degree.\"\"\"\n MV = \"mv\"\n EVEN = \"even\"\n ODD = \"odd\"\n SCALAR = \"scalar\"\n VECTOR = \"vector\"\n BIVECTOR = \"bivector\"\n TRIVECTOR = \"trivector\"\n PSEUDOSCALAR = \"pseudoscalar\"\n PSEUDOVECTOR = \"pseudovector\"\n PSEUDOBIVECTOR = \"pseudobivector\"\n PSEUDOTRIVECTOR = \"pseudotrivector\"" }, { "identifier": "GeometricAlgebra", "path": "torch_ga/torch_ga.py", "snippet": "class GeometricAlgebra:\n \"\"\"Class used for performing geometric algebra operations on `torch.Tensor` instances.\n Exposes methods for operating on `torch.Tensor` instances where their last\n axis is interpreted as blades of the algebra.\n Holds the metric and other quantities derived from it.\n \"\"\"\n\n def __init__(self, metric: List[float]):\n \"\"\"Creates a GeometricAlgebra object given a metric.\n The algebra will have as many basis vectors as there are\n elements in the metric.\n\n Args:\n metric: Metric as a list. Specifies what basis vectors square to\n \"\"\"\n self._metric = torch.tensor(metric, dtype=torch.float32)\n\n self._num_bases = len(metric)\n self._bases = list(map(str, range(self._num_bases)))\n\n self._blades, self._blade_degrees = blades_from_bases(self._bases)\n self._blade_degrees = torch.tensor(self._blade_degrees)\n self._num_blades = len(self._blades)\n self._max_degree = self._blade_degrees.max()\n\n # [Blades, Blades, Blades]\n _list = get_cayley_tensor(self.metric, self._bases, self._blades)\n # print(_list)\n if type(_list) in [list,tuple]:\n _list = np.array(_list)\n self._cayley, self._cayley_inner, self._cayley_outer = torch.tensor(\n _list,\n dtype=torch.float32\n )\n\n self._blade_mvs = torch.eye(self._num_blades)\n self._basis_mvs = self._blade_mvs[1:1+self._num_bases]\n\n # Find the dual by looking at the anti-diagonal in the Cayley tensor.\n self._dual_blade_indices = []\n self._dual_blade_signs = []\n\n for blade_index in range(self._num_blades):\n dual_index = self.num_blades - blade_index - 1\n anti_diag = self._cayley[blade_index, dual_index]\n # dual_sign = tf.gather(anti_diag, tf.where(\n # anti_diag != 0.0)[..., 0])[..., 0]\n dual_sign = anti_diag[torch.where(anti_diag != 0.0)]\n\n self._dual_blade_indices.append(dual_index)\n self._dual_blade_signs.append(dual_sign)\n\n self._dual_blade_indices = torch.tensor(\n self._dual_blade_indices, dtype=torch.int64)\n self._dual_blade_signs = torch.tensor(\n self._dual_blade_signs, dtype=torch.float32)\n\n def print(self, *args, **kwargs):\n \"\"\"Same as the default `print` function but formats `torch.Tensor`\n instances that have as many elements on their last axis\n as the algebra has blades using `mv_repr()`.\n \"\"\"\n def _is_mv(arg):\n return isinstance(arg, torch.Tensor) and len(arg.shape) > 0 and arg.shape[-1] == self.num_blades\n new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args]\n\n print(*new_args, **kwargs)\n\n @property\n def metric(self) -> torch.Tensor:\n \"\"\"Metric list which contains the number that each\n basis vector in the algebra squares to\n (ie. the diagonal of the metric tensor).\n \"\"\"\n return self._metric\n\n @property\n def cayley(self) -> torch.Tensor:\n \"\"\"`MxMxM` tensor where `M` is the number of basis\n blades in the algebra. Used for calculating the\n geometric product:\n\n `a_i, b_j, cayley_ijk -> c_k`\n \"\"\"\n return self._cayley\n\n @property\n def cayley_inner(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for inner product.\"\"\"\n return self._cayley_inner\n\n @property\n def cayley_outer(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for outer product.\"\"\"\n return self._cayley_outer\n\n @property\n def blades(self) -> List[str]:\n \"\"\"List of all blade names.\n\n Blades are all possible independent combinations of\n basis vectors. Basis vectors are named starting\n from `\"0\"` and counting up. The scalar blade is the\n empty string `\"\"`.\n\n Example\n - Bases: `[\"0\", \"1\", \"2\"]`\n - Blades: `[\"\", \"0\", \"1\", \"2\", \"01\", \"02\", \"12\", \"012\"]`\n \"\"\"\n return self._blades\n\n @property\n def blade_mvs(self) -> torch.Tensor:\n \"\"\"List of all blade tensors in the algebra.\"\"\"\n return self._blade_mvs\n\n @property\n def dual_blade_indices(self) -> torch.Tensor:\n \"\"\"Indices of the dual blades for each blade.\"\"\"\n return self._dual_blade_indices\n\n @property\n def dual_blade_signs(self) -> torch.Tensor:\n \"\"\"Signs of the dual blades for each blade.\"\"\"\n return self._dual_blade_signs\n\n @property\n def num_blades(self) -> int:\n \"\"\"Total number of blades in the algebra.\"\"\"\n return self._num_blades\n\n @property\n def blade_degrees(self) -> torch.Tensor:\n \"\"\"List of blade-degree for each blade in the algebra.\"\"\"\n return self._blade_degrees\n\n @property\n def max_degree(self) -> int:\n \"\"\"Highest blade degree in the algebra.\"\"\"\n return self._max_degree\n\n @property\n def basis_mvs(self) -> torch.Tensor:\n \"\"\"List of basis vectors as torch.Tensor.\"\"\"\n return self._basis_mvs\n\n def get_kind_blade_indices(self, kind: BladeKind, invert: bool = False) -> torch.Tensor:\n \"\"\"Find all indices of blades of a given kind in the algebra.\n\n Args:\n kind: kind of blade to give indices for\n invert: whether to return all blades not of the kind\n\n Returns:\n indices of blades of a given kind in the algebra\n \"\"\"\n return get_blade_of_kind_indices(self.blade_degrees, kind, self.max_degree, invert=invert)\n\n def get_blade_indices_of_degree(self, degree: int) -> torch.Tensor:\n \"\"\"Find all indices of blades of the given degree.\n\n Args:\n degree: degree to return blades for\n\n Returns:\n indices of blades with the given degree in the algebra\n \"\"\"\n # return tf.gather(tf.range(self.num_blades), tf.where(self.blade_degrees == degree)[..., 0])\n return torch.range(self.num_blades)[torch.where(self.blade_degrees == degree)[..., 0]]\n\n def is_pure(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> bool:\n \"\"\"Returns whether the given tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades.\n\n Args:\n tensor: tensor to check purity for\n blade_indices: blade indices to check purity for\n\n Returns:\n Whether the tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n if not type(blade_indices) in [torch.Tensor]:\n blade_indices = torch.tensor(blade_indices)\n \n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_indices = torch.tensor(\n # blade_indices, dtype=torch.int64)\n\n inverted_blade_indices = invert_blade_indices(\n self.num_blades, blade_indices)\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_blade_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_blade_indices]==0).sum(dim=-1)\n\n def is_pure_kind(self, tensor: torch.Tensor, kind: BladeKind) -> bool:\n \"\"\"Returns whether the given tensor is purely of a given kind\n and has no non-zero values for blades not of the kind.\n\n Args:\n tensor: tensor to check purity for\n kind: kind of blade to check purity for\n\n Returns:\n Whether the tensor is purely of a given kind\n and has no non-zero values for blades not of the kind\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n inverted_kind_indices = self.get_kind_blade_indices(kind, invert=True)\n # print(f\"tensor={tensor}\")\n # print(f\"kind={kind}\")\n # print(f\"inverted_kind_indices={inverted_kind_indices.T}\")\n # print(f\"inverted_kind_indices.shape={inverted_kind_indices.shape}\")\n # print(f\"tensor[inverted_kind_indices]={tensor[inverted_kind_indices].T}\")\n # print(f\"tensor[inverted_kind_indices].shape={tensor[inverted_kind_indices].shape}\")\n # print(f\"tensor[inverted_kind_indices]==0={tensor[inverted_kind_indices].T==0}\")\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_kind_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_kind_indices]==0).sum(dim=-1)\n\n # def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n # \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n # indices. The blade indices have to align with the last axis of the\n # tensor.\n\n # Args:\n # tensor: torch.Tensor to take as values for the geometric algebra tensor\n # blade_indices: Blade indices corresponding to the tensor. Can\n # be obtained from blade names eg. using get_kind_blade_indices()\n # or as indices from the blades list property.\n\n # Returns:\n # Geometric algebra torch.Tensor from tensor and blade indices\n # \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n # # print(f\"blade_indices={blade_indices}\")\n # # print(f\"tensor={tensor}\")\n \n # _shape = tensor.shape\n # is_scalar = False\n # if len(_shape)==1 :\n # _shape_final = [1]+ [self.num_blades] \n # is_scalar = True\n # else:\n # _shape_final = list(_shape[:-1]) + [self.num_blades] \n # b = torch.zeros(_shape_final)\n \n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # # raise \"whatever\"\n # b = b.reshape(_shape_final)\n\n # # _shape_tmp = list(v.shape) + [self.num_blades] \n # # print(f\"i,v,_shape_tmp,_shape_final={i},{v},{_shape_tmp},{_shape_final},i.shape={i.shape}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp)\n # # print(f\"b={b}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp).to_dense()\n # # b = b.reshape(_shape_final)\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n # # # Put last axis on first axis so scatter_nd becomes easier.\n # # # Later undo the transposition again.\n # # # t = tf.concat([[tensor.shape.ndims - 1],\n # # # tf.range(0, tensor.shape.ndims - 1)], axis=0)\n # # # t_inv = tf.concat([tf.range(1, tensor.shape.ndims), [0]], axis=0)\n\n # # # tensor = tf.transpose(tensor, t)\n\n # # # shape = tf.concat([\n # # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # # tf.shape(tensor, torch.int64)[1:]\n # # # ], axis=0)\n\n # # # tensor = tf.scatter_nd(\n # # # tf.expand_dims(blade_indices, axis=-1),\n # # # tensor,\n # # # shape\n # # # )\n\n # # # return tf.transpose(tensor, t_inv)\n # # # t = torch.concat([torch.tensor([len(tensor.shape) - 1]), torch.range(0, len(tensor.shape)- 1)], axis=0)\n # # # t_inv = torch.concat([torch.range(1, len(tensor.shape)), torch.tensor([0])], axis=0)\n # # t = [len(tensor.shape) - 1] + list(range(0, len(tensor.shape)- 1))\n # # t_inv = list(range(1, len(tensor.shape))) + [0]\n\n # # tensor = torch.permute(tensor, t)\n\n # # a= torch.tensor([self.num_blades], dtype=torch.int64)\n # # b = torch.tensor(tensor, dtype=torch.int64)[1:]\n # # print(\"a,b:\", a,b, tensor)\n\n\n # # shape = torch.concat([\n # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # torch.tensor(tensor, dtype=torch.int64)[1:]\n # # ], axis=0)\n\n\n # # # tensor = torch.scatter_nd(\n # # # blade_indices.unsqueeze(-1),\n # # # tensor,\n # # # shape\n # # # )\n # # a = torch.zeros(shape)\n # # a[blade_indices] = tensor\n # # tensor = a\n\n # # return torch.permute(tensor, t_inv) \n \n\n def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n indices. The blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n blade_indices: Blade indices corresponding to the tensor. Can\n be obtained from blade names eg. using get_kind_blade_indices()\n or as indices from the blades list property.\n\n Returns:\n Geometric algebra torch.Tensor from tensor and blade indices\n \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n tensor = tensor.to(dtype=torch.float32)\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"tensor={tensor}\")\n \n _shape = tensor.shape\n is_scalar = False\n if len(_shape)==1 :\n _shape_final = [1]+ [self.num_blades] \n is_scalar = True\n else:\n _shape_final = list(_shape[:-1]) + [self.num_blades] \n b = torch.zeros(_shape_final)\n\n if False:\n print(f\"blade_indices.shape={blade_indices.shape}\")\n print(f\"tensor.shape={tensor.shape}\")\n print(f\"_shape_final={_shape_final}\")\n \n\n\n # i = blade_indices.view([-1,1])\n # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n i = blade_indices.flatten()\n # v = tensor.flatten().unsqueeze(1)\n v = tensor.view([-1,_shape[-1]])\n b = b.view([-1,self.num_blades])\n if False:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n\n # b[:,i] = v\n try:\n b[:,i] = v\n except:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n raise\n b = b.reshape(_shape_final)\n\n if False:\n print(f\"b.shape={b.shape}\")\n\n if is_scalar:\n # b=b.unsqueeze(0)\n b=b.squeeze(0)\n return b\n\n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # b = b.reshape(_shape_final)\n\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n \n\n def from_tensor_with_kind(self, tensor: torch.Tensor, kind: BladeKind) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and a kind.\n The kind's blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n kind: Kind corresponding to the tensor\n\n Returns:\n Geometric algebra torch.Tensor from tensor and kind\n \"\"\"\n # Put last axis on first axis so scatter_nd becomes easier.\n # Later undo the transposition again.\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n kind_indices = self.get_kind_blade_indices(kind)\n if False:\n print(f\"tensor={tensor}\")\n print(f\"kind_indices={kind_indices}\")\n return self.from_tensor(tensor, kind_indices)\n\n def from_scalar(self, scalar: numbers.Number) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor with scalar elements.\n\n Args:\n scalar: Elements to be used as scalars\n\n Returns:\n Geometric algebra torch.Tensor from scalars\n \"\"\"\n # return self.from_tensor_with_kind(tf.expand_dims(scalar, axis=-1), BladeKind.SCALAR)\n # print(\"torch.tensor([scalar]).unsqueeze(-1).shape\",torch.tensor([scalar]).unsqueeze(-1).shape)\n return self.from_tensor_with_kind(torch.tensor([scalar]).unsqueeze(-1), BladeKind.SCALAR).squeeze(0)\n\n def e(self, *blades: List[str]) -> torch.Tensor:\n \"\"\"Returns a geometric algebra torch.Tensor with the given blades set\n to 1.\n\n Args:\n blades: list of blade names, can be unnormalized\n\n Returns:\n torch.Tensor with blades set to 1\n \"\"\"\n blade_signs, blade_indices = get_blade_indices_from_names(\n blades, self.blades)\n\n assert type(blade_indices) in [torch.Tensor], \"should be a tensor\"\n if False: blade_indices = torch.tensor(blade_indices)\n\n # # Don't allow duplicate indices\n # tf.Assert(\n # blade_indices.shape[0] == tf.unique(blade_indices)[0].shape[0],\n # [blades]\n # )\n\n # x = (\n # tf.expand_dims(blade_signs, axis=-1) *\n # tf.gather(self.blade_mvs, blade_indices)\n # )\n\n # # a, b -> b\n # return tf.reduce_sum(x, axis=-2)\n\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"torch.unique(blade_indices)={torch.unique(blade_indices)}\")\n # print(f\"torch.unique(blade_indices)[0]={torch.unique(blade_indices)[0]}\")\n # Don't allow duplicate indices\n # assert(\n # blade_indices.shape[0] == torch.unique(blade_indices).shape[0],\n # [blades]\n # )\n assert blade_indices.shape[0] == torch.unique(blade_indices).shape[0], \"indexes not unique\"\n\n x = blade_signs.unsqueeze(-1) * self.blade_mvs[blade_indices]\n\n # a, b -> b\n return x.sum(dim=-2) \n\n def __getattr__(self, name: str) -> torch.Tensor:\n \"\"\"Returns basis blade tensors if name was a basis.\"\"\"\n if name.startswith(\"e\") and (name[1:] == \"\" or int(name[1:]) >= 0):\n return self.e(name[1:])\n raise AttributeError\n\n def dual(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the dual of the geometric algebra tensor.\n\n Args:\n tensor: Geometric algebra tensor to return dual for\n\n Returns:\n Dual of the geometric algebra tensor\n \"\"\"\n tensor = torch.tensor(tensor, dtype=torch.float32)\n # return self.dual_blade_signs * tf.gather(tensor, self.dual_blade_indices, axis=-1)\n return self.dual_blade_signs * tensor[...,self.dual_blade_indices]\n\n def grade_automorphism(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor with odd grades negated.\n See https://en.wikipedia.org/wiki/Paravector#Grade_automorphism.\n\n Args:\n tensor: Geometric algebra tensor to return grade automorphism for\n\n Returns:\n Geometric algebra tensor with odd grades negated\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return mv_grade_automorphism(tensor, self.blade_degrees)\n\n def reversion(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the grade-reversed geometric algebra tensor.\n See https://en.wikipedia.org/wiki/Paravector#Reversion_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return grade-reversion for\n\n Returns:\n Grade-reversed geometric algebra tensor\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n\n return mv_reversion(tensor, self.blade_degrees)\n\n def conjugation(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Combines reversion and grade automorphism.\n See https://en.wikipedia.org/wiki/Paravector#Clifford_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return conjugate for\n\n Returns:\n Geometric algebra tensor after `reversion()` and `grade_automorphism()`\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return self.grade_automorphism(self.reversion(tensor))\n\n def simple_inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`. Only works for elements that\n square to scalars. Faster than the general inverse.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n rev_a = self.reversion(a)\n divisor = self.geom_prod(a, rev_a)\n # print(f\"divisor={divisor}\")\n # print(f\"self.is_pure_kind(divisor, BladeKind.SCALAR)={self.is_pure_kind(divisor, BladeKind.SCALAR)}\")\n if not self.is_pure_kind(divisor, BladeKind.SCALAR):\n raise Exception(\n \"Can't invert multi-vector (inversion divisor V ~V not scalar: %s).\" % divisor)\n\n # Divide by scalar part\n return rev_a / divisor[..., :1]\n\n def reg_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the regressive product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the regressive product\n b: Geometric algebra tensor on the right hand side of\n the regressive product\n\n Returns:\n regressive product of a and b\n \"\"\"\n a = torch.tensor(a, dtype=torch.float32)\n b = torch.tensor(b, dtype=torch.float32)\n\n return self.dual(self.ext_prod(self.dual(a), self.dual(b)))\n\n def ext_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the exterior product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the exterior product\n b: Geometric algebra tensor on the right hand side of\n the exterior product\n\n Returns:\n exterior product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_outer)\n\n def geom_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply(a, b, self._cayley)\n\n \n def element_wise_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the element-wise product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply_element_wise(a, b, self._cayley)\n\n\n def inner_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inner product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the inner product\n b: Geometric algebra tensor on the right hand side of\n the inner product\n\n Returns:\n inner product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_inner)\n\n def geom_conv1d(self, a: torch.Tensor, k: torch.Tensor,\n stride: int, padding: str,\n dilations: Union[int, None] = None) -> torch.Tensor:\n \"\"\"Returns the 1D convolution of a sequence with a geometric algebra\n tensor kernel. The convolution is performed using the geometric\n product.\n\n Args:\n a: Input geometric algebra tensor of shape\n [..., Length, ChannelsIn, Blades]\n k: Geometric algebra tensor for the convolution kernel of shape\n [KernelSize, ChannelsIn, ChannelsOut, Blades]\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n Returns:\n Geometric algbra tensor of shape\n [..., OutputLength, ChannelsOut, Blades]\n representing `a` convolved with `k`\n \"\"\"\n a = a.to(dtype=torch.float32)\n k = k.to(dtype=torch.float32)\n\n # return mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n return f_mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n\n def mv_repr(self, a: torch.Tensor) -> str:\n \"\"\"Returns a string representation for the given\n geometric algebra tensor.\n\n Args:\n a: Geometric algebra tensor to return the representation for\n\n Returns:\n string representation for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if len(a.shape) == 1:\n return \"MultiVector[%s]\" % \" + \".join(\n \"%.2f*%s\" % (value, get_blade_repr(blade_name))\n for value, blade_name\n in zip(a, self.blades)\n if value != 0\n )\n else:\n return f\"MultiVector[batch_shape={a.shape[:-1]}]\"\n\n def approx_exp(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the exponential using a centered taylor series.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n order: order of the approximation\n\n Returns:\n Approximation of `exp(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n v = self.from_scalar(1.0)\n result = self.from_scalar(1.0)\n for i in range(1, order + 1):\n v = self.geom_prod(a, v)\n # i_factorial = tf.exp(tf.math.lgamma(i + 1.0))\n i_factorial = torch.exp(torch.lgamma(torch.tensor([i + 1.0])))\n result += v / i_factorial\n return result\n\n def exp(self, a: torch.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> torch.Tensor:\n \"\"\"Returns the exponential of the passed geometric algebra tensor.\n Only works for multivectors that square to scalars.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n\n Returns:\n `exp(a)`\n \"\"\"\n # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm\n # for an explanation of how to exponentiate multivectors.\n\n self_sq = self.geom_prod(a, a)\n\n if square_scalar_tolerance is not None:\n # tf.Assert(tf.reduce_all(\n # tf.abs(self_sq[..., 1:]) < square_scalar_tolerance\n # ), [self_sq])\n \n # assert torch.equal(torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance),[self_sq]), \"not sure what\"\n assert torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance), \"square_scalar_tolerance not met\"\n\n scalar_self_sq = self_sq[..., :1]\n\n # \"Complex\" square root (argument can be negative)\n s_sqrt = torch.sign(scalar_self_sq) * torch.sqrt(torch.abs(scalar_self_sq))\n\n # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||))\n # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||))\n # TODO: Does this work for values other than 1 too? eg. square to +0.5?\n # TODO: Find a solution that doesnt require calculating all possibilities\n # first.\n non_zero_result = torch.where(\n scalar_self_sq < 0,\n (self.from_tensor(torch.cos(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sin(s_sqrt)),\n (self.from_tensor(torch.cosh(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sinh(s_sqrt))\n )\n\n return torch.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)\n\n def approx_log(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the natural logarithm using a centered\n taylor series. Only converges for multivectors where `||mv - 1|| < 1`.\n\n Args:\n a: Geometric algebra tensor to return logarithm for\n order: order of the approximation\n\n Returns:\n Approximation of `log(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n result = self.from_scalar(0.0)\n\n a_minus_one = a - self.from_scalar(1.0)\n v = None\n\n for i in range(1, order + 1):\n v = a_minus_one if v is None else v * a_minus_one\n result += (((-1.0) ** i) / i) * v\n\n return -result\n\n def int_pow(self, a: torch.Tensor, n: int) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor to the power of an integer\n using repeated multiplication.\n\n Args:\n a: Geometric algebra tensor to raise\n n: integer power to raise the multivector to\n\n Returns:\n `a` to the power of `n`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if not isinstance(n, int):\n raise Exception(\"n must be an integer.\")\n if n < 0:\n raise Exception(\"Can't raise to negative powers.\")\n\n if n == 0:\n # TODO: more efficient (ones only in scalar)\n return torch.ones_like(a) * self.e(\"\")\n\n result = a\n for i in range(n - 1):\n result = self.geom_prod(result, a)\n return result\n\n def keep_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blade_indices as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to keep\n\n Returns:\n `a` with only `blade_indices` components as non-zeros\n \"\"\"\n a = a.to(dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_values = tf.gather(a, blade_indices, axis=-1)\n blade_values = a[...,blade_indices]\n if True: \n b = self.from_tensor(blade_values, blade_indices)\n else:\n blade_mask = torch.zeros(self.num_blades)\n blade_mask[blade_indices] = 1\n b = self.from_tensor(blade_values, blade_mask)\n # print(f\"blade_values, blade_indices, b={blade_values}, {blade_indices}, {b}\")\n # print(f\"blade_mask={blade_mask}\")\n return b\n\n # return self.from_tensor(blade_values, blade_indices)\n\n def keep_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blades as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `a` with only `blade_names` components as non-zeros\n \"\"\"\n if isinstance(blade_names, str):\n blade_names = [blade_names]\n\n _, blade_indices = get_blade_indices_from_names(blade_names, self.blades)\n\n if False:\n print(f\"self.blades={self.blades}\")\n print(f\"blade_names={blade_names}\")\n print(f\"blade_indices={blade_indices}\")\n\n return self.keep_blades(a, blade_indices)\n\n def select_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_indices on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to select\n\n Returns:\n `torch.Tensor` based on `a` with `blade_indices` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32) \n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # result = tf.gather(a, blade_indices, axis=-1)\n try:\n if len(a.shape)==1 or a.shape[-1]==a.size().numel():\n result = a.squeeze()[blade_indices]\n else:\n result = a[...,blade_indices]\n except:\n print(f\"a={a},blade_indices={blade_indices}\")\n print(f\"a.shape={a.shape},blade_indices.shape={blade_indices.shape},a.size().numel()={a.size().numel()}\")\n raise\n \n return result\n\n def select_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_names on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `torch.Tensor` based on `a` with `blade_names` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n is_single_blade = isinstance(blade_names, str)\n if is_single_blade:\n blade_names = [blade_names]\n\n blade_signs, blade_indices = get_blade_indices_from_names(\n blade_names, self.blades)\n\n result = blade_signs * self.select_blades(a, blade_indices)\n # if True:\n # print(f\"\")\n\n if is_single_blade:\n return result[..., 0]\n\n return result\n\n def inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`.\n\n Using Shirokov's inverse algorithm that works in arbitrary dimensions,\n see https://arxiv.org/abs/2005.04015 Theorem 4.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n a = a.to(dtype=torch.float32)\n if False:\n print(f\"a={a}\")\n\n n = 2 ** ((len(self.metric) + 1) // 2)\n\n # u = a.clone()\n u = a\n for k in range(1, n):\n # c = n / k * self.keep_blades_with_name(u, \"\")\n d = self.keep_blades_with_name(u, \"\")\n c = n / k * d\n u_minus_c = u - c\n if False:\n print(f\"a,d,c,u_minus_c, u = {a},{d},{c},{u_minus_c}, {u}\")\n u = self.geom_prod(a, u_minus_c)\n if False:\n print(f\"u={u}\")\n \n if False:\n print(f\"n={n}\")\n print(f\"a={a}\")\n print(f\"u={u}\")\n if not torch.all(self.is_pure_kind(u, BladeKind.SCALAR)):\n raise Exception(\n \"Can't invert multi-vector (det U not scalar: %s).\" % u)\n\n # adj / det\n return u_minus_c / u[..., :1]\n\n def __call__(self, a: torch.Tensor) -> MultiVector:\n \"\"\"Creates a `MultiVector` from a geometric algebra tensor.\n Mainly used as a wrapper for the algebra's functions for convenience.\n\n Args:\n a: Geometric algebra tensor to return `MultiVector` for\n\n Returns:\n `MultiVector` for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n return MultiVector(a, self)\n # return MultiVector(torch.tensor(a), self)" } ]
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
16,872
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4]
tensor_kind_to_geom_layer = TensorWithKindToGeometric(
9
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,358
if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online
streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT)
0
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/rules_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: str | None\n genre: list[str]\n label: list[str]\n releasetype: str\n\n albumartists: ArtistMapping\n trackartists: ArtistMapping\n\n duration_sec: int\n\n path: Path\n\n @classmethod\n def from_file(cls, p: Path) -> AudioTags:\n \"\"\"Read the tags of an audio file on disk.\"\"\"\n if not any(p.suffix.lower() == ext for ext in SUPPORTED_AUDIO_EXTENSIONS):\n raise UnsupportedFiletypeError(f\"{p.suffix} not a supported filetype\")\n try:\n m = mutagen.File(p) # type: ignore\n except mutagen.MutagenError as e: # type: ignore\n raise UnsupportedFiletypeError(f\"Failed to open file: {e}\") from e\n if isinstance(m, mutagen.mp3.MP3):\n # ID3 returns trackno/discno tags as no/total. We have to parse.\n tracknumber = discnumber = tracktotal = disctotal = None\n if tracknos := _get_tag(m.tags, [\"TRCK\"]):\n try:\n tracknumber, tracktotalstr = tracknos.split(\"/\", 1)\n tracktotal = _parse_int(tracktotalstr)\n except ValueError:\n tracknumber = tracknos\n if discnos := _get_tag(m.tags, [\"TPOS\"]):\n try:\n discnumber, disctotalstr = discnos.split(\"/\", 1)\n disctotal = _parse_int(disctotalstr)\n except ValueError:\n discnumber = discnos\n\n def _get_paired_frame(x: str) -> str | None:\n if not m.tags:\n return None\n for tag in [\"TIPL\", \"IPLS\"]:\n try:\n frame = m.tags[tag]\n except KeyError:\n continue\n return r\" \\\\ \".join([p[1] for p in frame.people if p[0].lower() == x.lower()])\n return None\n\n return AudioTags(\n id=_get_tag(m.tags, [\"TXXX:ROSEID\"]),\n release_id=_get_tag(m.tags, [\"TXXX:ROSERELEASEID\"]),\n title=_get_tag(m.tags, [\"TIT2\"]),\n year=_parse_year(_get_tag(m.tags, [\"TDRC\", \"TYER\"])),\n tracknumber=tracknumber,\n tracktotal=tracktotal,\n discnumber=discnumber,\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"TALB\"]),\n genre=_split_tag(_get_tag(m.tags, [\"TCON\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"TPUB\"], split=True)),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"TXXX:RELEASETYPE\"], first=True)),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"TPE2\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"TPE1\"], split=True),\n remixer=_get_tag(m.tags, [\"TPE4\"], split=True),\n composer=_get_tag(m.tags, [\"TCOM\"], split=True),\n conductor=_get_tag(m.tags, [\"TPE3\"], split=True),\n producer=_get_paired_frame(\"producer\"),\n dj=_get_paired_frame(\"DJ-mix\"),\n ),\n duration_sec=round(m.info.length),\n path=p,\n )\n if isinstance(m, mutagen.mp4.MP4):\n tracknumber = discnumber = tracktotal = disctotal = None\n with contextlib.suppress(ValueError):\n tracknumber, tracktotalstr = _get_tuple_tag(m.tags, [\"trkn\"]) # type: ignore\n tracktotal = _parse_int(tracktotalstr)\n with contextlib.suppress(ValueError):\n discnumber, disctotalstr = _get_tuple_tag(m.tags, [\"disk\"]) # type: ignore\n disctotal = _parse_int(disctotalstr)\n\n return AudioTags(\n id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:ID\"]),\n release_id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:RELEASEID\"]),\n title=_get_tag(m.tags, [\"\\xa9nam\"]),\n year=_parse_year(_get_tag(m.tags, [\"\\xa9day\"])),\n tracknumber=str(tracknumber),\n tracktotal=tracktotal,\n discnumber=str(discnumber),\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"\\xa9alb\"]),\n genre=_split_tag(_get_tag(m.tags, [\"\\xa9gen\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"----:com.apple.iTunes:LABEL\"], split=True)),\n releasetype=_normalize_rtype(\n _get_tag(m.tags, [\"----:com.apple.iTunes:RELEASETYPE\"], first=True)\n ),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"aART\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"\\xa9ART\"], split=True),\n remixer=_get_tag(m.tags, [\"----:com.apple.iTunes:REMIXER\"], split=True),\n producer=_get_tag(m.tags, [\"----:com.apple.iTunes:PRODUCER\"], split=True),\n composer=_get_tag(m.tags, [\"\\xa9wrt\"], split=True),\n conductor=_get_tag(m.tags, [\"----:com.apple.iTunes:CONDUCTOR\"], split=True),\n dj=_get_tag(m.tags, [\"----:com.apple.iTunes:DJMIXER\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n return AudioTags(\n id=_get_tag(m.tags, [\"roseid\"]),\n release_id=_get_tag(m.tags, [\"rosereleaseid\"]),\n title=_get_tag(m.tags, [\"title\"]),\n year=_parse_year(_get_tag(m.tags, [\"date\", \"year\"])),\n tracknumber=_get_tag(m.tags, [\"tracknumber\"], first=True),\n tracktotal=_parse_int(_get_tag(m.tags, [\"tracktotal\"], first=True)),\n discnumber=_get_tag(m.tags, [\"discnumber\"], first=True),\n disctotal=_parse_int(_get_tag(m.tags, [\"disctotal\"], first=True)),\n album=_get_tag(m.tags, [\"album\"]),\n genre=_split_tag(_get_tag(m.tags, [\"genre\"], split=True)),\n label=_split_tag(\n _get_tag(m.tags, [\"organization\", \"label\", \"recordlabel\"], split=True)\n ),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"releasetype\"], first=True)),\n albumartists=parse_artist_string(\n main=_get_tag(m.tags, [\"albumartist\"], split=True)\n ),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"artist\"], split=True),\n remixer=_get_tag(m.tags, [\"remixer\"], split=True),\n producer=_get_tag(m.tags, [\"producer\"], split=True),\n composer=_get_tag(m.tags, [\"composer\"], split=True),\n conductor=_get_tag(m.tags, [\"conductor\"], split=True),\n dj=_get_tag(m.tags, [\"djmixer\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n raise UnsupportedFiletypeError(f\"{p} is not a supported audio file\")\n\n @no_type_check\n def flush(self, *, validate: bool = True) -> None:\n \"\"\"Flush the current tags to the file on disk.\"\"\"\n m = mutagen.File(self.path)\n if not validate and \"pytest\" not in sys.modules:\n raise Exception(\"Validate can only be turned off by tests.\")\n\n self.releasetype = (self.releasetype or \"unknown\").lower()\n if validate and self.releasetype not in SUPPORTED_RELEASE_TYPES:\n raise UnsupportedTagValueTypeError(\n f\"Release type {self.releasetype} is not a supported release type.\\n\"\n f\"Supported release types: {', '.join(SUPPORTED_RELEASE_TYPES)}\"\n )\n\n if isinstance(m, mutagen.mp3.MP3):\n if m.tags is None:\n m.tags = mutagen.id3.ID3()\n\n def _write_standard_tag(key: str, value: str | None) -> None:\n m.tags.delall(key)\n frame = getattr(mutagen.id3, key)(text=value)\n if value:\n m.tags.add(frame)\n\n def _write_tag_with_description(name: str, value: str | None) -> None:\n key, desc = name.split(\":\", 1)\n # Since the ID3 tags work with the shared prefix key before `:`, manually preserve\n # the other tags with the shared prefix key.\n keep_fields = [f for f in m.tags.getall(key) if getattr(f, \"desc\", None) != desc]\n m.tags.delall(key)\n if value:\n frame = getattr(mutagen.id3, key)(desc=desc, text=value)\n m.tags.add(frame)\n for f in keep_fields:\n m.tags.add(f)\n\n _write_tag_with_description(\"TXXX:ROSEID\", self.id)\n _write_tag_with_description(\"TXXX:ROSERELEASEID\", self.release_id)\n _write_standard_tag(\"TIT2\", self.title)\n _write_standard_tag(\"TDRC\", str(self.year).zfill(4))\n _write_standard_tag(\"TRCK\", self.tracknumber)\n _write_standard_tag(\"TPOS\", self.discnumber)\n _write_standard_tag(\"TALB\", self.album)\n _write_standard_tag(\"TCON\", \";\".join(self.genre))\n _write_standard_tag(\"TPUB\", \";\".join(self.label))\n _write_tag_with_description(\"TXXX:RELEASETYPE\", self.releasetype)\n _write_standard_tag(\"TPE2\", format_artist_string(self.albumartists))\n _write_standard_tag(\"TPE1\", format_artist_string(self.trackartists))\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n m.tags.delall(\"TPE4\")\n m.tags.delall(\"TCOM\")\n m.tags.delall(\"TPE3\")\n # Delete all paired text frames, since these represent additional artist roles. We don't\n # want to preserve them.\n m.tags.delall(\"TIPL\")\n m.tags.delall(\"IPLS\")\n m.save()\n return\n if isinstance(m, mutagen.mp4.MP4):\n if m.tags is None:\n m.tags = mutagen.mp4.MP4Tags()\n m.tags[\"----:net.sunsetglow.rose:ID\"] = (self.id or \"\").encode()\n m.tags[\"----:net.sunsetglow.rose:RELEASEID\"] = (self.release_id or \"\").encode()\n m.tags[\"\\xa9nam\"] = self.title or \"\"\n m.tags[\"\\xa9day\"] = str(self.year).zfill(4)\n m.tags[\"\\xa9alb\"] = self.album or \"\"\n m.tags[\"\\xa9gen\"] = \";\".join(self.genre)\n m.tags[\"----:com.apple.iTunes:LABEL\"] = \";\".join(self.label).encode()\n m.tags[\"----:com.apple.iTunes:RELEASETYPE\"] = self.releasetype.encode()\n m.tags[\"aART\"] = format_artist_string(self.albumartists)\n m.tags[\"\\xa9ART\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:REMIXER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:PRODUCER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"\\xa9wrt\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:CONDUCTOR\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:DJMIXER\"]\n\n # The track and disc numbers in MP4 are a bit annoying, because they must be a\n # single-element list of 2-tuple ints. We preserve the previous tracktotal/disctotal (as\n # Rose does not care about those values), and then attempt to write our own tracknumber\n # and discnumber.\n try:\n prev_tracktotal = m.tags[\"trkn\"][0][1]\n except (KeyError, IndexError):\n prev_tracktotal = 1\n try:\n prev_disctotal = m.tags[\"disk\"][0][1]\n except (KeyError, IndexError):\n prev_disctotal = 1\n try:\n m.tags[\"trkn\"] = [(int(self.tracknumber or \"0\"), prev_tracktotal)]\n m.tags[\"disk\"] = [(int(self.discnumber or \"0\"), prev_disctotal)]\n except ValueError as e:\n raise UnsupportedTagValueTypeError(\n \"Could not write m4a trackno/discno tags: must be integers. \"\n f\"Got: {self.tracknumber=} / {self.discnumber=}\"\n ) from e\n\n m.save()\n return\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n if m.tags is None:\n if isinstance(m, mutagen.flac.FLAC):\n m.tags = mutagen.flac.VCFLACDict()\n elif isinstance(m, mutagen.oggvorbis.OggVorbis):\n m.tags = mutagen.oggvorbis.OggVCommentDict()\n else:\n m.tags = mutagen.oggopus.OggOpusVComment()\n assert not isinstance(m.tags, mutagen.flac.MetadataBlock)\n m.tags[\"roseid\"] = self.id or \"\"\n m.tags[\"rosereleaseid\"] = self.release_id or \"\"\n m.tags[\"title\"] = self.title or \"\"\n m.tags[\"date\"] = str(self.year).zfill(4)\n m.tags[\"tracknumber\"] = self.tracknumber or \"\"\n m.tags[\"discnumber\"] = self.discnumber or \"\"\n m.tags[\"album\"] = self.album or \"\"\n m.tags[\"genre\"] = \";\".join(self.genre)\n m.tags[\"organization\"] = \";\".join(self.label)\n m.tags[\"releasetype\"] = self.releasetype\n m.tags[\"albumartist\"] = format_artist_string(self.albumartists)\n m.tags[\"artist\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"remixer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"producer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"composer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"conductor\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"djmixer\"]\n m.save()\n return\n\n raise RoseError(f\"Impossible: unknown mutagen type: {type(m)=} ({repr(m)=})\")" }, { "identifier": "list_releases", "path": "rose/cache.py", "snippet": "def list_releases(c: Config, release_ids: list[str] | None = None) -> list[CachedRelease]:\n \"\"\"Fetch data associated with given release IDs. Pass None to fetch all.\"\"\"\n query = \"SELECT * FROM releases_view\"\n args = []\n if release_ids is not None:\n query += f\" WHERE id IN ({','.join(['?']*len(release_ids))})\"\n args = release_ids\n query += \" ORDER BY source_path\"\n with connect(c) as conn:\n cursor = conn.execute(query, args)\n releases: list[CachedRelease] = []\n for row in cursor:\n releases.append(CachedRelease.from_view(c, row))\n return releases" }, { "identifier": "list_tracks", "path": "rose/cache.py", "snippet": "def list_tracks(c: Config, track_ids: list[str] | None = None) -> list[CachedTrack]:\n \"\"\"Fetch data associated with given track IDs. Pass None to fetch all.\"\"\"\n query = \"SELECT * FROM tracks_view\"\n args = []\n if track_ids is not None:\n query += f\" WHERE id IN ({','.join(['?']*len(track_ids))})\"\n args = track_ids\n query += \" ORDER BY source_path\"\n with connect(c) as conn:\n cursor = conn.execute(query, args)\n trackrows = cursor.fetchall()\n\n release_ids = [r[\"release_id\"] for r in trackrows]\n cursor = conn.execute(\n f\"\"\"\n SELECT *\n FROM releases_view\n WHERE id IN ({','.join(['?']*len(release_ids))})\n \"\"\",\n release_ids,\n )\n releases_map: dict[str, CachedRelease] = {}\n for row in cursor:\n releases_map[row[\"id\"]] = CachedRelease.from_view(c, row)\n\n rval = []\n for row in trackrows:\n rval.append(CachedTrack.from_view(c, row, releases_map[row[\"release_id\"]]))\n return rval" }, { "identifier": "update_cache", "path": "rose/cache.py", "snippet": "def update_cache(\n c: Config,\n force: bool = False,\n # For testing.\n force_multiprocessing: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for all releases in the music source directory. Delete\n any cached releases that are no longer present on disk.\n \"\"\"\n update_cache_for_releases(c, None, force, force_multiprocessing=force_multiprocessing)\n update_cache_evict_nonexistent_releases(c)\n update_cache_for_collages(c, None, force)\n update_cache_evict_nonexistent_collages(c)\n update_cache_for_playlists(c, None, force)\n update_cache_evict_nonexistent_playlists(c)" }, { "identifier": "Artist", "path": "rose/common.py", "snippet": "class Artist:\n name: str\n alias: bool = False\n\n def __hash__(self) -> int:\n return hash((self.name, self.alias))" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" }, { "identifier": "MetadataMatcher", "path": "rose/rule_parser.py", "snippet": "class MetadataMatcher:\n # Tags to test against the pattern. If any tags match the pattern, the action will be ran\n # against the track.\n tags: list[Tag]\n # The pattern to test the tag against.\n pattern: MatcherPattern\n\n def __str__(self) -> str:\n r = stringify_tags(self.tags)\n r += \":\"\n r += str(self.pattern)\n return r\n\n @classmethod\n def parse(cls, raw: str) -> MetadataMatcher:\n idx = 0\n # Common arguments to feed into Syntax Error.\n err = {\"rule_name\": \"matcher\", \"rule\": raw}\n\n # First, parse the tags.\n tags: list[Tag] = []\n found_colon = False\n while True:\n for t, resolved in ALL_TAGS.items():\n if not raw[idx:].startswith(t):\n continue\n try:\n if raw[idx:][len(t)] not in [\":\", \",\"]:\n continue\n except IndexError:\n raise RuleSyntaxError(\n **err,\n index=idx + len(t),\n feedback=\"Expected to find ',' or ':', found end of string.\",\n ) from None\n tags.extend(resolved)\n idx += len(t) + 1\n found_colon = raw[idx - 1] == \":\"\n break\n else:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=f\"Invalid tag: must be one of {{{', '.join(ALL_TAGS)}}}. The next character after a tag must be ':' or ','.\",\n )\n if found_colon:\n break\n\n # Then parse the pattern.\n pattern, fwd = take(raw[idx:], \":\", including=False)\n idx += fwd\n\n # If more input is remaining, it should be optional single-character flags.\n case_insensitive = False\n if idx < len(raw) and raw[idx] == \":\":\n idx += 1\n flags, fwd = take(raw[idx:], \":\")\n if not flags:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"No flags specified: Please remove this section (by deleting the colon) or specify one of the supported flags: `i` (case insensitive).\",\n )\n for i, flag in enumerate(flags):\n if flag == \"i\":\n case_insensitive = True\n continue\n raise RuleSyntaxError(\n **err,\n index=idx + i,\n feedback=\"Unrecognized flag: Please specify one of the supported flags: `i` (case insensitive).\",\n )\n idx += fwd\n\n if raw[idx:]:\n raise RuleSyntaxError(\n **err,\n index=idx,\n feedback=\"Extra input found after end of matcher. Perhaps you meant to escape this colon?\",\n )\n\n matcher = MetadataMatcher(\n tags=tags,\n pattern=MatcherPattern(pattern=pattern, case_insensitive=case_insensitive),\n )\n logger.debug(f\"Parsed rule matcher {raw=} as {matcher=}\")\n return matcher" }, { "identifier": "MetadataRule", "path": "rose/rule_parser.py", "snippet": "class MetadataRule:\n matcher: MetadataMatcher\n actions: list[MetadataAction]\n\n def __str__(self) -> str:\n rval: list[str] = []\n rval.append(f\"matcher={shlex.quote(str(self.matcher))}\")\n for action in self.actions:\n rval.append(f\"action={shlex.quote(str(action))}\")\n return \" \".join(rval)\n\n @classmethod\n def parse(cls, matcher: str, actions: list[str]) -> MetadataRule:\n parsed_matcher = MetadataMatcher.parse(matcher)\n return MetadataRule(\n matcher=parsed_matcher,\n actions=[MetadataAction.parse(a, i + 1, parsed_matcher) for i, a in enumerate(actions)],\n )" }, { "identifier": "FastSearchResult", "path": "rose/rules.py", "snippet": "class FastSearchResult:\n id: str\n path: Path" }, { "identifier": "TrackTagNotAllowedError", "path": "rose/rules.py", "snippet": "class TrackTagNotAllowedError(RoseExpectedError):\n pass" }, { "identifier": "execute_metadata_rule", "path": "rose/rules.py", "snippet": "def execute_metadata_rule(\n c: Config,\n rule: MetadataRule,\n *,\n dry_run: bool = False,\n confirm_yes: bool = False,\n enter_number_to_confirm_above_count: int = 25,\n) -> None:\n \"\"\"\n This function executes a metadata update rule. It runs in five parts:\n\n 1. Run a search query on our Full Text Search index. This is far more performant than the SQL\n LIKE operation; however, it is also less precise. It produces false positives, but should not\n produce false negatives. So we then run:\n 2. Read the files returned from the search query and remove all false positives.\n 3. We then run the actions on each valid matched file and store all the intended changes\n in-memory. No changes are written to disk.\n 4. We then prompt the user to confirm the changes, assuming confirm_yes is True.\n 5. We then flush the intended changes to disk.\n \"\"\"\n # Newline for appearance.\n click.echo()\n fast_search_results = fast_search_for_matching_tracks(c, rule.matcher)\n if not fast_search_results:\n click.secho(\"No matching tracks found\", dim=True, italic=True)\n click.echo()\n return\n # If there are more than 400 tracks matched, first filter the matched tracks using the cache,\n # has a sublinear time complexity (but higher baseline). Only then run the tag filter, which has\n # linear time complexity.\n if len(fast_search_results) > 400:\n time_start = time.time()\n tracks = list_tracks(c, [t.id for t in fast_search_results])\n logger.debug(\n f\"Fetched tracks from cache for filtering in {time.time() - time_start} seconds\"\n )\n tracks = filter_track_false_positives_using_read_cache(rule.matcher, tracks)\n track_ids = {x.id for x in tracks}\n fast_search_results = [t for t in fast_search_results if t.id in track_ids]\n if not fast_search_results:\n click.secho(\"No matching tracks found\", dim=True, italic=True)\n click.echo()\n return\n\n matcher_audiotags = filter_track_false_positives_using_tags(rule.matcher, fast_search_results)\n if not matcher_audiotags:\n click.secho(\"No matching tracks found\", dim=True, italic=True)\n click.echo()\n return\n execute_metadata_actions(\n c,\n rule.actions,\n matcher_audiotags,\n dry_run=dry_run,\n confirm_yes=confirm_yes,\n enter_number_to_confirm_above_count=enter_number_to_confirm_above_count,\n )" }, { "identifier": "execute_stored_metadata_rules", "path": "rose/rules.py", "snippet": "def execute_stored_metadata_rules(\n c: Config,\n *,\n dry_run: bool = False,\n confirm_yes: bool = False,\n) -> None:\n for rule in c.stored_metadata_rules:\n click.secho(f\"Executing stored metadata rule {rule}\", dim=True)\n execute_metadata_rule(c, rule, dry_run=dry_run, confirm_yes=confirm_yes)" }, { "identifier": "fast_search_for_matching_releases", "path": "rose/rules.py", "snippet": "def fast_search_for_matching_releases(\n c: Config,\n matcher: MetadataMatcher,\n) -> list[FastSearchResult]:\n \"\"\"Basically the same thing as fast_search_for_matching_tracks but with releases.\"\"\"\n time_start = time.time()\n if track_tags := [t for t in matcher.tags if t not in RELEASE_TAGS]:\n # But allow an exception if both trackartist and albumartist are defined: means a shorthand\n # was used. Just ignore trackartist.\n if any(t.startswith(\"albumartist\") for t in matcher.tags):\n track_tags = [t for t in track_tags if not t.startswith(\"trackartist\")]\n else:\n raise TrackTagNotAllowedError(\n f\"Track tags are not allowed when matching against releases: {', '.join(track_tags)}\"\n )\n\n matchsql = _convert_matcher_to_fts_query(matcher.pattern)\n logger.debug(f\"Converted match {matcher=} to {matchsql=}\")\n columns = uniq([TAG_ROLE_REGEX.sub(\"\", t) for t in matcher.tags])\n ftsquery = f\"{{{' '.join(columns)}}} : {matchsql}\"\n query = f\"\"\"\n SELECT DISTINCT r.id, r.source_path\n FROM rules_engine_fts\n JOIN tracks t ON rules_engine_fts.rowid = t.rowid\n JOIN releases r ON r.id = t.release_id\n WHERE rules_engine_fts MATCH '{ftsquery}'\n ORDER BY r.source_path\n \"\"\"\n logger.debug(f\"Constructed matching query {query}\")\n results: list[FastSearchResult] = []\n with connect(c) as conn:\n for row in conn.execute(query):\n results.append(FastSearchResult(id=row[\"id\"], path=Path(row[\"source_path\"]).resolve()))\n logger.debug(\n f\"Matched {len(results)} releases from the read cache in {time.time() - time_start} seconds\"\n )\n return results" }, { "identifier": "fast_search_for_matching_tracks", "path": "rose/rules.py", "snippet": "def fast_search_for_matching_tracks(\n c: Config,\n matcher: MetadataMatcher,\n) -> list[FastSearchResult]:\n \"\"\"\n Run a search for tracks with the matcher on the Full Text Search index. This is _fast_, but will\n produce false positives. The caller must filter out the false positives after pulling the\n results.\n \"\"\"\n time_start = time.time()\n matchsql = _convert_matcher_to_fts_query(matcher.pattern)\n logger.debug(f\"Converted match {matcher=} to {matchsql=}\")\n\n # Build the query to fetch a superset of tracks to attempt to execute the rules against. Note\n # that we directly use string interpolation here instead of prepared queries, because we are\n # constructing a complex match string and everything is escaped and spaced-out with a random\n # paragraph character, so there's no risk of SQL being interpreted.\n #\n # Remove the \"artist role\" from the tag, as we do not track the role information in the FTS\n # table. The false positives should be minimal enough that performance should be roughly the\n # same if we filter them out in the tag checking step.\n columns = uniq([TAG_ROLE_REGEX.sub(\"\", t) for t in matcher.tags])\n ftsquery = f\"{{{' '.join(columns)}}} : {matchsql}\"\n query = f\"\"\"\n SELECT DISTINCT t.id, t.source_path\n FROM rules_engine_fts\n JOIN tracks t ON rules_engine_fts.rowid = t.rowid\n WHERE rules_engine_fts MATCH '{ftsquery}'\n ORDER BY t.source_path\n \"\"\"\n logger.debug(f\"Constructed matching query {query}\")\n # And then execute the SQL query. Note that we don't pull the tag values here. This query is\n # only used to identify the matching tracks. Afterwards, we will read each track's tags from\n # disk and apply the action on those tag values.\n results: list[FastSearchResult] = []\n with connect(c) as conn:\n for row in conn.execute(query):\n results.append(\n FastSearchResult(\n id=row[\"id\"],\n path=Path(row[\"source_path\"]).resolve(),\n )\n )\n logger.debug(\n f\"Matched {len(results)} tracks from the read cache in {time.time() - time_start} seconds\"\n )\n return results" }, { "identifier": "filter_release_false_positives_using_read_cache", "path": "rose/rules.py", "snippet": "def filter_release_false_positives_using_read_cache(\n matcher: MetadataMatcher,\n releases: list[CachedRelease],\n) -> list[CachedRelease]:\n time_start = time.time()\n rval = []\n for r in releases:\n for field in matcher.tags:\n match = False\n # Only attempt to match the release tags; ignore track tags.\n # fmt: off\n match = match or (field == \"year\" and matches_pattern(matcher.pattern, r.year)) \n match = match or (field == \"albumtitle\" and matches_pattern(matcher.pattern, r.albumtitle)) \n match = match or (field == \"releasetype\" and matches_pattern(matcher.pattern, r.releasetype)) \n match = match or (field == \"genre\" and any(matches_pattern(matcher.pattern, x) for x in r.genres)) \n match = match or (field == \"label\" and any(matches_pattern(matcher.pattern, x) for x in r.labels)) \n match = match or (field == \"albumartist[main]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.main)) \n match = match or (field == \"albumartist[guest]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.guest)) \n match = match or (field == \"albumartist[remixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.remixer)) \n match = match or (field == \"albumartist[producer]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.producer)) \n match = match or (field == \"albumartist[composer]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.composer)) \n match = match or (field == \"albumartist[djmixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in r.albumartists.djmixer)) \n # fmt: on\n if match:\n rval.append(r)\n break\n logger.debug(\n f\"Filtered {len(releases)} releases down to {len(rval)} releases in {time.time() - time_start} seconds\"\n )\n return rval" }, { "identifier": "filter_track_false_positives_using_read_cache", "path": "rose/rules.py", "snippet": "def filter_track_false_positives_using_read_cache(\n matcher: MetadataMatcher,\n tracks: list[CachedTrack],\n) -> list[CachedTrack]:\n time_start = time.time()\n rval = []\n for t in tracks:\n for field in matcher.tags:\n match = False\n # fmt: off\n match = match or (field == \"tracktitle\" and matches_pattern(matcher.pattern, t.tracktitle)) \n match = match or (field == \"year\" and matches_pattern(matcher.pattern, t.release.year)) \n match = match or (field == \"tracknumber\" and matches_pattern(matcher.pattern, t.tracknumber)) \n match = match or (field == \"tracktotal\" and matches_pattern(matcher.pattern, t.tracktotal)) \n match = match or (field == \"discnumber\" and matches_pattern(matcher.pattern, t.discnumber)) \n match = match or (field == \"disctotal\" and matches_pattern(matcher.pattern, t.disctotal)) \n match = match or (field == \"albumtitle\" and matches_pattern(matcher.pattern, t.release.albumtitle)) \n match = match or (field == \"releasetype\" and matches_pattern(matcher.pattern, t.release.releasetype)) \n match = match or (field == \"genre\" and any(matches_pattern(matcher.pattern, x) for x in t.release.genres)) \n match = match or (field == \"label\" and any(matches_pattern(matcher.pattern, x) for x in t.release.labels)) \n match = match or (field == \"trackartist[main]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.main)) \n match = match or (field == \"trackartist[guest]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.guest)) \n match = match or (field == \"trackartist[remixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.remixer)) \n match = match or (field == \"trackartist[producer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.producer)) \n match = match or (field == \"trackartist[composer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.composer)) \n match = match or (field == \"trackartist[djmixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.trackartists.djmixer)) \n match = match or (field == \"albumartist[main]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.main)) \n match = match or (field == \"albumartist[guest]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.guest)) \n match = match or (field == \"albumartist[remixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.remixer)) \n match = match or (field == \"albumartist[producer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.producer)) \n match = match or (field == \"albumartist[composer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.composer)) \n match = match or (field == \"albumartist[djmixer]\" and any(matches_pattern(matcher.pattern, x.name) for x in t.release.albumartists.djmixer)) \n # fmt: on\n if match:\n rval.append(t)\n break\n logger.debug(\n f\"Filtered {len(tracks)} tracks down to {len(rval)} tracks in {time.time() - time_start} seconds\"\n )\n return rval" } ]
import dataclasses import pytest from pathlib import Path from typing import Any from unittest.mock import Mock from rose.audiotags import AudioTags from rose.cache import ( list_releases, list_tracks, update_cache, ) from rose.common import Artist from rose.config import Config from rose.rule_parser import MetadataMatcher, MetadataRule from rose.rules import ( FastSearchResult, TrackTagNotAllowedError, execute_metadata_rule, execute_stored_metadata_rules, fast_search_for_matching_releases, fast_search_for_matching_tracks, filter_release_false_positives_using_read_cache, filter_track_false_positives_using_read_cache, )
16,537
execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop", "lalala"] def test_action_on_different_tag(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:A Cool Label", ["genre::replace:hi"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["hi"] def test_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::sed:P:B"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Bop", "Bop"] def test_chained_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse( "label:A Cool Label", [ "replace:Jennie", "label:^Jennie$::replace:Jisoo", "label:nomatch::replace:Rose", "genre::replace:haha", ], ) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["Jisoo"] assert af.genre == ["haha"] @pytest.mark.timeout(2) def test_confirmation_yes(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: True) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.timeout(2) def test_confirmation_no(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: False) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" @pytest.mark.timeout(2) def test_confirmation_count(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.prompt", Mock(side_effect=["no", "8", "6"])) # Abort. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Success in two arguments. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_dry_run(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) execute_metadata_rule(config, rule, dry_run=True, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" def test_run_stored_rules(config: Config, source_dir: Path) -> None: config = dataclasses.replace( config, stored_metadata_rules=[MetadataRule.parse("tracktitle:Track", ["replace:lalala"])], ) execute_stored_metadata_rules(config) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases(config: Config) -> None: results = fast_search_for_matching_releases( config, MetadataMatcher.parse("albumartist:Techno Man") ) assert results == [FastSearchResult(id="r1", path=config.music_source_dir / "r1")] @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases_invalid_tag(config: Config) -> None: with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("tracktitle:x")) with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("trackartist:x")) # But allow artist tag: fast_search_for_matching_releases(config, MetadataMatcher.parse("artist:x")) @pytest.mark.usefixtures("seeded_cache") def test_filter_release_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("albumartist:^Man") fsresults = fast_search_for_matching_releases(config, matcher) assert len(fsresults) == 2 cacheresults = list_releases(config, [r.id for r in fsresults]) assert len(cacheresults) == 2 filteredresults = filter_release_false_positives_using_read_cache(matcher, cacheresults) assert not filteredresults @pytest.mark.usefixtures("seeded_cache") def test_filter_track_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("trackartist:^Man") fsresults = fast_search_for_matching_tracks(config, matcher) assert len(fsresults) == 3
def test_rules_execution_match_substring(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:bbb", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:rack", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_beginnning(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:^rack", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:^Track", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_end(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:rack$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:rack 1$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_superstrict(config: Config, source_dir: Path) -> None: # No match rule = MetadataRule.parse("tracktitle:^Track $", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Match rule = MetadataRule.parse("tracktitle:^Track 1$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_execution_match_case_insensitive(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:tRaCk:i", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_rules_fields_match_tracktitle(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_year(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("year:1990", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.year == 8 def test_rules_fields_match_releasetype(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("releasetype:album", ["replace:live"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.releasetype == "live" def test_rules_fields_match_tracknumber(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracknumber:1", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.tracknumber == "8" def test_rules_fields_match_tracktotal(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktotal:2", ["tracktitle::replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_discnumber(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("discnumber:1", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.discnumber == "8" def test_rules_fields_match_disctotal(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("disctotal:1", ["tracktitle::replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "8" def test_rules_fields_match_albumtitle(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("albumtitle:Love Blackpink", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.album == "8" def test_rules_fields_match_genre(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["8", "Pop"] def test_rules_fields_match_label(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["8"] def test_rules_fields_match_albumartist(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("albumartist:BLACKPINK", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.albumartists.main == [Artist("8")] def test_rules_fields_match_trackartist(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("trackartist:BLACKPINK", ["replace:8"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.trackartists.main == [Artist("8")] def test_match_backslash(config: Config, source_dir: Path) -> None: af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") af.title = r"X \\ Y" af.flush() update_cache(config) rule = MetadataRule.parse(r"tracktitle: \\\\ ", [r"sed: \\\\\\\\ : / "]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "X / Y" def test_action_replace_with_delimiter(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["replace:Hip-Hop;Rap"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["Hip-Hop", "Rap", "Pop"] def test_action_replace_with_delimiters_empty_str(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::replace:Hip-Hop;;;;"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["Hip-Hop"] def test_sed_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["sed:ack:ip"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "Trip 1" def test_sed_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:P", [r"matched:::sed:^(.*)$:i\\1"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["iK-Pop", "iPop"] def test_split_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["split:Cool"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["A", "Label"] def test_split_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::split:P"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-", "op"] def test_add_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:Cool", ["add:Even Cooler Label"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["A Cool Label", "Even Cooler Label"] def test_delete_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["delete"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop"] def test_delete_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["matched:::delete"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == [] def test_preserves_unmatched_multitags(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:^Pop$", ["replace:lalala"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Pop", "lalala"] def test_action_on_different_tag(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("label:A Cool Label", ["genre::replace:hi"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["hi"] def test_action_no_pattern(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("genre:K-Pop", ["matched:::sed:P:B"]) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.genre == ["K-Bop", "Bop"] def test_chained_action(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse( "label:A Cool Label", [ "replace:Jennie", "label:^Jennie$::replace:Jisoo", "label:nomatch::replace:Rose", "genre::replace:haha", ], ) execute_metadata_rule(config, rule, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.label == ["Jisoo"] assert af.genre == ["haha"] @pytest.mark.timeout(2) def test_confirmation_yes(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: True) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.timeout(2) def test_confirmation_no(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.confirm", lambda *_, **__: False) execute_metadata_rule(config, rule, confirm_yes=True) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" @pytest.mark.timeout(2) def test_confirmation_count(monkeypatch: Any, config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) monkeypatch.setattr("rose.rules.click.prompt", Mock(side_effect=["no", "8", "6"])) # Abort. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" # Success in two arguments. execute_metadata_rule(config, rule, confirm_yes=True, enter_number_to_confirm_above_count=1) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" def test_dry_run(config: Config, source_dir: Path) -> None: rule = MetadataRule.parse("tracktitle:Track", ["replace:lalala"]) execute_metadata_rule(config, rule, dry_run=True, confirm_yes=False) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title != "lalala" def test_run_stored_rules(config: Config, source_dir: Path) -> None: config = dataclasses.replace( config, stored_metadata_rules=[MetadataRule.parse("tracktitle:Track", ["replace:lalala"])], ) execute_stored_metadata_rules(config) af = AudioTags.from_file(source_dir / "Test Release 1" / "01.m4a") assert af.title == "lalala" @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases(config: Config) -> None: results = fast_search_for_matching_releases( config, MetadataMatcher.parse("albumartist:Techno Man") ) assert results == [FastSearchResult(id="r1", path=config.music_source_dir / "r1")] @pytest.mark.usefixtures("seeded_cache") def test_fast_search_for_matching_releases_invalid_tag(config: Config) -> None: with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("tracktitle:x")) with pytest.raises(TrackTagNotAllowedError): fast_search_for_matching_releases(config, MetadataMatcher.parse("trackartist:x")) # But allow artist tag: fast_search_for_matching_releases(config, MetadataMatcher.parse("artist:x")) @pytest.mark.usefixtures("seeded_cache") def test_filter_release_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("albumartist:^Man") fsresults = fast_search_for_matching_releases(config, matcher) assert len(fsresults) == 2 cacheresults = list_releases(config, [r.id for r in fsresults]) assert len(cacheresults) == 2 filteredresults = filter_release_false_positives_using_read_cache(matcher, cacheresults) assert not filteredresults @pytest.mark.usefixtures("seeded_cache") def test_filter_track_false_positives_with_read_cache(config: Config) -> None: matcher = MetadataMatcher.parse("trackartist:^Man") fsresults = fast_search_for_matching_tracks(config, matcher) assert len(fsresults) == 3
tracks = list_tracks(config, [r.id for r in fsresults])
2
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_combo_roboverse.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropout_rate: Optional[float] = None,\n init_last: bool = False\n ) -> None:\n super().__init__()\n hidden_dims = [input_dim] + list(hidden_dims)\n model = []\n for in_dim, out_dim in zip(hidden_dims[:-1], hidden_dims[1:]):\n model += [nn.Linear(in_dim, out_dim), activation()]\n if dropout_rate is not None:\n model += [nn.Dropout(p=dropout_rate)]\n\n self.output_dim = hidden_dims[-1]\n if output_dim is not None:\n last_layer = nn.Linear(hidden_dims[-1], output_dim)\n if init_last:\n nn.init.xavier_uniform_(last_layer.weight, gain=1e-2)\n nn.init.constant_(last_layer.bias, 0.0)\n model += [last_layer]\n self.output_dim = output_dim\n self.model = nn.Sequential(*model)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.model(x)" }, { "identifier": "ActorProb", "path": "offlinerlkit/modules/actor_module.py", "snippet": "class ActorProb(nn.Module):\n def __init__(\n self,\n backbone: nn.Module,\n dist_net: nn.Module,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n self.dist_net = dist_net.to(device)\n\n def forward(self, obs: Union[np.ndarray, torch.Tensor]) -> torch.distributions.Normal:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n logits = self.backbone(obs)\n dist = self.dist_net(logits)\n return dist" }, { "identifier": "Critic", "path": "offlinerlkit/modules/critic_module.py", "snippet": "class Critic(nn.Module):\n def __init__(self, backbone: nn.Module, device: str = \"cpu\") -> None:\n super().__init__()\n\n self.device = torch.device(device)\n self.backbone = backbone.to(device)\n latent_dim = getattr(backbone, \"output_dim\")\n self.last = nn.Linear(latent_dim, 1).to(device)\n\n def forward(\n self,\n obs: Union[np.ndarray, torch.Tensor],\n actions: Optional[Union[np.ndarray, torch.Tensor]] = None\n ) -> torch.Tensor:\n obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)\n if actions is not None:\n actions = torch.as_tensor(actions, device=self.device, dtype=torch.float32).flatten(1)\n obs = torch.cat([obs, actions], dim=1)\n logits = self.backbone(obs)\n values = self.last(logits)\n return values" }, { "identifier": "TanhDiagGaussian", "path": "offlinerlkit/modules/dist_module.py", "snippet": "class TanhDiagGaussian(DiagGaussian):\n def __init__(\n self,\n latent_dim,\n output_dim,\n unbounded=False,\n conditioned_sigma=False,\n max_mu=1.0,\n sigma_min=-5.0,\n sigma_max=2.0\n ):\n super().__init__(\n latent_dim=latent_dim,\n output_dim=output_dim,\n unbounded=unbounded,\n conditioned_sigma=conditioned_sigma,\n max_mu=max_mu,\n sigma_min=sigma_min,\n sigma_max=sigma_max\n )\n\n def forward(self, logits):\n mu = self.mu(logits)\n if not self._unbounded:\n mu = self._max * torch.tanh(mu)\n if self._c_sigma:\n sigma = torch.clamp(self.sigma(logits), min=self._sigma_min, max=self._sigma_max).exp()\n else:\n shape = [1] * len(mu.shape)\n shape[1] = -1\n sigma = (self.sigma_param.view(shape) + torch.zeros_like(mu)).exp()\n return TanhNormalWrapper(mu, sigma)" }, { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: int = 7,\n num_elites: int = 5,\n activation: nn.Module = Swish,\n weight_decays: Optional[Union[List[float], Tuple[float]]] = None,\n with_reward: bool = True,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.num_ensemble = num_ensemble\n self.num_elites = num_elites\n self._with_reward = with_reward\n self.device = torch.device(device)\n\n self.activation = activation()\n\n assert len(weight_decays) == (len(hidden_dims) + 1)\n\n module_list = []\n hidden_dims = [obs_dim+action_dim] + list(hidden_dims)\n if weight_decays is None:\n weight_decays = [0.0] * (len(hidden_dims) + 1)\n for in_dim, out_dim, weight_decay in zip(hidden_dims[:-1], hidden_dims[1:], weight_decays[:-1]):\n module_list.append(EnsembleLinear(in_dim, out_dim, num_ensemble, weight_decay))\n self.backbones = nn.ModuleList(module_list)\n\n self.output_layer = EnsembleLinear(\n hidden_dims[-1],\n 2 * (obs_dim + self._with_reward),\n num_ensemble,\n weight_decays[-1]\n )\n\n self.register_parameter(\n \"max_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * -10, requires_grad=True)\n )\n\n self.register_parameter(\n \"elites\",\n nn.Parameter(torch.tensor(list(range(0, self.num_elites))), requires_grad=False)\n )\n\n self.to(self.device)\n\n def forward(self, obs_action: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:\n obs_action = torch.as_tensor(obs_action, dtype=torch.float32).to(self.device)\n output = obs_action\n for layer in self.backbones:\n output = self.activation(layer(output))\n mean, logvar = torch.chunk(self.output_layer(output), 2, dim=-1)\n logvar = soft_clamp(logvar, self.min_logvar, self.max_logvar)\n return mean, logvar\n\n def load_save(self) -> None:\n for layer in self.backbones:\n layer.load_save()\n self.output_layer.load_save()\n\n def update_save(self, indexes: List[int]) -> None:\n for layer in self.backbones:\n layer.update_save(indexes)\n self.output_layer.update_save(indexes)\n \n def get_decay_loss(self) -> torch.Tensor:\n decay_loss = 0\n for layer in self.backbones:\n decay_loss += layer.get_decay_loss()\n decay_loss += self.output_layer.get_decay_loss()\n return decay_loss\n\n def set_elites(self, indexes: List[int]) -> None:\n assert len(indexes) <= self.num_ensemble and max(indexes) < self.num_ensemble\n self.register_parameter('elites', nn.Parameter(torch.tensor(indexes), requires_grad=False))\n \n def random_elite_idxs(self, batch_size: int) -> np.ndarray:\n idxs = np.random.choice(self.elites.data.cpu().numpy(), size=batch_size)\n return idxs" }, { "identifier": "EnsembleDynamics", "path": "offlinerlkit/dynamics/ensemble_dynamics.py", "snippet": "class EnsembleDynamics(BaseDynamics):\n def __init__(\n self,\n model: nn.Module,\n optim: torch.optim.Optimizer,\n scaler: StandardScaler,\n terminal_fn: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray],\n penalty_coef: float = 0.0,\n uncertainty_mode: str = \"aleatoric\"\n ) -> None:\n super().__init__(model, optim)\n self.scaler = scaler\n self.terminal_fn = terminal_fn\n self._penalty_coef = penalty_coef\n self._uncertainty_mode = uncertainty_mode\n\n @ torch.no_grad()\n def step(\n self,\n obs: np.ndarray,\n action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:\n '''\n Return:\n reward (B,1) (if obs has batch)\n terminal (B,1)\n '''\n \"imagine single forward step\"\n obs_act = np.concatenate([obs, action], axis=-1)\n obs_act = self.scaler.transform(obs_act)\n mean, logvar = self.model(obs_act)\n mean = mean.cpu().numpy()\n logvar = logvar.cpu().numpy()\n mean[..., :-1] += obs # We estimated delta_obs\n std = np.sqrt(np.exp(logvar))\n\n ensemble_samples = (mean + np.random.normal(size=mean.shape) * std).astype(np.float32)\n\n # choose one model from ensemble\n num_models, batch_size, _ = ensemble_samples.shape\n model_idxs = self.model.random_elite_idxs(batch_size)\n samples = ensemble_samples[model_idxs, np.arange(batch_size)]\n \n next_obs = samples[..., :-1]\n reward = samples[..., -1:]\n terminal = self.terminal_fn(obs, action, next_obs)\n info = {}\n info[\"raw_reward\"] = reward\n\n if self._penalty_coef:\n if self._uncertainty_mode == \"aleatoric\":\n penalty = np.amax(np.linalg.norm(std, axis=2), axis=0)\n elif self._uncertainty_mode == \"pairwise-diff\":\n next_obses_mean = mean[..., :-1]\n next_obs_mean = np.mean(next_obses_mean, axis=0)\n diff = next_obses_mean - next_obs_mean\n penalty = np.amax(np.linalg.norm(diff, axis=2), axis=0)\n elif self._uncertainty_mode == \"ensemble_std\":\n next_obses_mean = mean[..., :-1]\n penalty = np.sqrt(next_obses_mean.var(0).mean(1))\n else:\n raise ValueError\n penalty = np.expand_dims(penalty, 1).astype(np.float32)\n assert penalty.shape == reward.shape\n reward = reward - self._penalty_coef * penalty\n info[\"penalty\"] = penalty\n \n return next_obs, reward, terminal, info\n \n @ torch.no_grad()\n def sample_next_obss(\n self,\n obs: torch.Tensor,\n action: torch.Tensor,\n num_samples: int\n ) -> torch.Tensor:\n obs_act = torch.cat([obs, action], dim=-1)\n obs_act = self.scaler.transform_tensor(obs_act)\n mean, logvar = self.model(obs_act)\n mean[..., :-1] += obs\n std = torch.sqrt(torch.exp(logvar))\n\n mean = mean[self.model.elites.data.cpu().numpy()]\n std = std[self.model.elites.data.cpu().numpy()]\n\n samples = torch.stack([mean + torch.randn_like(std) * std for i in range(num_samples)], 0)\n next_obss = samples[..., :-1]\n return next_obss\n\n def format_samples_for_training(self, data: Dict) -> Tuple[np.ndarray, np.ndarray]:\n obss = data[\"observations\"]\n actions = data[\"actions\"]\n next_obss = data[\"next_observations\"]\n rewards = data[\"rewards\"]\n rewards = rewards.reshape(rewards.shape[0], -1)\n delta_obss = next_obss - obss\n inputs = np.concatenate((obss, actions), axis=-1)\n targets = np.concatenate((delta_obss, rewards), axis=-1)\n return inputs, targets\n\n def train(\n self,\n data: Dict,\n logger: Logger,\n max_epochs: Optional[float] = None,\n max_epochs_since_update: int = 5,\n batch_size: int = 256,\n holdout_ratio: float = 0.2,\n logvar_loss_coef: float = 0.01\n ) -> None:\n inputs, targets = self.format_samples_for_training(data)\n data_size = inputs.shape[0]\n holdout_size = min(int(data_size * holdout_ratio), 1000)\n train_size = data_size - holdout_size\n train_splits, holdout_splits = torch.utils.data.random_split(range(data_size), (train_size, holdout_size))\n train_inputs, train_targets = inputs[train_splits.indices], targets[train_splits.indices]\n holdout_inputs, holdout_targets = inputs[holdout_splits.indices], targets[holdout_splits.indices]\n\n self.scaler.fit(train_inputs)\n train_inputs = self.scaler.transform(train_inputs)\n holdout_inputs = self.scaler.transform(holdout_inputs)\n holdout_losses = [1e10 for i in range(self.model.num_ensemble)]\n\n data_idxes = np.random.randint(train_size, size=[self.model.num_ensemble, train_size])\n def shuffle_rows(arr):\n idxes = np.argsort(np.random.uniform(size=arr.shape), axis=-1)\n return arr[np.arange(arr.shape[0])[:, None], idxes]\n\n epoch = 0\n cnt = 0\n logger.log(\"Training dynamics:\")\n while True:\n epoch += 1\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], batch_size, logvar_loss_coef)\n new_holdout_losses = self.validate(holdout_inputs, holdout_targets)\n holdout_loss = (np.sort(new_holdout_losses)[:self.model.num_elites]).mean()\n logger.logkv(\"loss/dynamics_train_loss\", train_loss)\n logger.logkv(\"loss/dynamics_holdout_loss\", holdout_loss)\n logger.set_timestep(epoch)\n logger.dumpkvs(exclude=[\"policy_training_progress\"])\n\n # shuffle data for each base learner\n data_idxes = shuffle_rows(data_idxes)\n\n indexes = []\n for i, new_loss, old_loss in zip(range(len(holdout_losses)), new_holdout_losses, holdout_losses):\n improvement = (old_loss - new_loss) / old_loss\n if improvement > 0.01:\n indexes.append(i)\n holdout_losses[i] = new_loss\n \n if len(indexes) > 0:\n self.model.update_save(indexes)\n cnt = 0\n else:\n cnt += 1\n \n if (cnt >= max_epochs_since_update) or (max_epochs and (epoch >= max_epochs)):\n break\n\n indexes = self.select_elites(holdout_losses)\n self.model.set_elites(indexes)\n self.model.load_save()\n self.save(logger.model_dir)\n self.model.eval()\n logger.log(\"elites:{} , holdout loss: {}\".format(indexes, (np.sort(holdout_losses)[:self.model.num_elites]).mean()))\n \n def learn(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n batch_size: int = 256,\n logvar_loss_coef: float = 0.01\n ) -> float:\n self.model.train()\n train_size = inputs.shape[1]\n losses = []\n\n for batch_num in range(int(np.ceil(train_size / batch_size))):\n inputs_batch = inputs[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = targets[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = torch.as_tensor(targets_batch).to(self.model.device)\n \n mean, logvar = self.model(inputs_batch)\n inv_var = torch.exp(-logvar)\n # Average over batch and dim, sum over ensembles.\n mse_loss_inv = (torch.pow(mean - targets_batch, 2) * inv_var).mean(dim=(1, 2)) # MLE for Gaussian\n var_loss = logvar.mean(dim=(1, 2))\n loss = mse_loss_inv.sum() + var_loss.sum()\n loss = loss + self.model.get_decay_loss()\n loss = loss + logvar_loss_coef * self.model.max_logvar.sum() - logvar_loss_coef * self.model.min_logvar.sum()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n losses.append(loss.item())\n return np.mean(losses)\n \n @ torch.no_grad()\n def validate(self, inputs: np.ndarray, targets: np.ndarray) -> List[float]:\n self.model.eval()\n targets = torch.as_tensor(targets).to(self.model.device)\n mean, _ = self.model(inputs)\n loss = ((mean - targets) ** 2).mean(dim=(1, 2))\n val_loss = list(loss.cpu().numpy())\n return val_loss\n \n def select_elites(self, metrics: List) -> List[int]:\n pairs = [(metric, index) for metric, index in zip(metrics, range(len(metrics)))]\n pairs = sorted(pairs, key=lambda x: x[0])\n elites = [pairs[i][1] for i in range(self.model.num_elites)]\n return elites\n\n def save(self, save_path: str) -> None:\n torch.save(self.model.state_dict(), os.path.join(save_path, \"dynamics.pth\"))\n self.scaler.save_scaler(save_path)\n \n def load(self, load_path: str) -> None:\n self.model.load_state_dict(torch.load(os.path.join(load_path, \"dynamics.pth\"), map_location=self.model.device))\n self.scaler.load_scaler(load_path)" }, { "identifier": "StandardScaler", "path": "offlinerlkit/utils/scaler.py", "snippet": "class StandardScaler(object):\n def __init__(self, mu=None, std=None):\n self.mu = mu\n self.std = std\n\n def fit(self, data):\n \"\"\"Runs two ops, one for assigning the mean of the data to the internal mean, and\n another for assigning the standard deviation of the data to the internal standard deviation.\n This function must be called within a 'with <session>.as_default()' block.\n\n Arguments:\n data (np.ndarray): A numpy array containing the input\n\n Returns: None.\n \"\"\"\n self.mu = np.mean(data, axis=0, keepdims=True)\n self.std = np.std(data, axis=0, keepdims=True)\n self.std[self.std < 1e-12] = 1.0\n\n def transform(self, data):\n \"\"\"Transforms the input matrix data using the parameters of this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return (data - self.mu) / self.std\n\n def inverse_transform(self, data):\n \"\"\"Undoes the transformation performed by this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return self.std * data + self.mu\n \n def save_scaler(self, save_path):\n mu_path = path.join(save_path, \"mu.npy\")\n std_path = path.join(save_path, \"std.npy\")\n np.save(mu_path, self.mu)\n np.save(std_path, self.std)\n \n def load_scaler(self, load_path):\n mu_path = path.join(load_path, \"mu.npy\")\n std_path = path.join(load_path, \"std.npy\")\n self.mu = np.load(mu_path)\n self.std = np.load(std_path)\n\n def transform_tensor(self, data: torch.Tensor):\n device = data.device\n data = self.transform(data.cpu().numpy())\n data = torch.tensor(data, device=device)\n return data" }, { "identifier": "termination_fn_default", "path": "offlinerlkit/utils/termination_fns.py", "snippet": "def termination_fn_default(obs, act, next_obs):\n '''\n Return np.ndarray (obs.shape[0], 1)\n '''\n done = np.array([False] * obs.shape[0])\n done = done[:, None]\n return done" }, { "identifier": "ReplayBuffer", "path": "offlinerlkit/buffer/buffer.py", "snippet": "class ReplayBuffer:\n def __init__(\n self,\n buffer_size: int,\n obs_shape: Tuple,\n obs_dtype: np.dtype,\n action_dim: int,\n action_dtype: np.dtype,\n device: str = \"cpu\"\n ) -> None:\n self._max_size = buffer_size\n self.obs_shape = obs_shape\n self.obs_dtype = obs_dtype\n self.action_dim = action_dim\n self.action_dtype = action_dtype\n\n self._ptr = 0\n self._size = 0\n\n self.observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.next_observations = np.zeros((self._max_size,) + self.obs_shape, dtype=obs_dtype)\n self.actions = np.zeros((self._max_size, self.action_dim), dtype=action_dtype)\n self.rewards = np.zeros((self._max_size, 1), dtype=np.float32)\n self.terminals = np.zeros((self._max_size, 1), dtype=np.float32)\n\n self.device = torch.device(device)\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n terminal: np.ndarray\n ) -> None:\n # Copy to avoid modification by reference\n self.observations[self._ptr] = np.array(obs).copy()\n self.next_observations[self._ptr] = np.array(next_obs).copy()\n self.actions[self._ptr] = np.array(action).copy()\n self.rewards[self._ptr] = np.array(reward).copy()\n self.terminals[self._ptr] = np.array(terminal).copy()\n\n self._ptr = (self._ptr + 1) % self._max_size\n self._size = min(self._size + 1, self._max_size)\n \n def add_batch(\n self,\n obss: np.ndarray,\n next_obss: np.ndarray,\n actions: np.ndarray,\n rewards: np.ndarray,\n terminals: np.ndarray\n ) -> None:\n batch_size = len(obss)\n indexes = np.arange(self._ptr, self._ptr + batch_size) % self._max_size\n\n self.observations[indexes] = np.array(obss).copy()\n self.next_observations[indexes] = np.array(next_obss).copy()\n self.actions[indexes] = np.array(actions).copy()\n self.rewards[indexes] = np.array(rewards).copy()\n self.terminals[indexes] = np.array(terminals).copy()\n\n self._ptr = (self._ptr + batch_size) % self._max_size\n self._size = min(self._size + batch_size, self._max_size)\n \n def load_dataset(self, dataset: Dict[str, np.ndarray]) -> None:\n observations = np.array(dataset[\"observations\"], dtype=self.obs_dtype)\n next_observations = np.array(dataset[\"next_observations\"], dtype=self.obs_dtype)\n actions = np.array(dataset[\"actions\"], dtype=self.action_dtype)\n rewards = np.array(dataset[\"rewards\"], dtype=np.float32).reshape(-1, 1)\n terminals = np.array(dataset[\"terminals\"], dtype=np.float32).reshape(-1, 1)\n\n self.observations = observations\n self.next_observations = next_observations\n self.actions = actions\n self.rewards = rewards\n self.terminals = terminals\n\n self._ptr = len(observations)\n self._size = len(observations)\n \n def normalize_obs(self, eps: float = 1e-3) -> Tuple[np.ndarray, np.ndarray]:\n mean = self.observations.mean(0, keepdims=True)\n std = self.observations.std(0, keepdims=True) + eps\n self.observations = (self.observations - mean) / std\n self.next_observations = (self.next_observations - mean) / std\n obs_mean, obs_std = mean, std\n return obs_mean, obs_std\n\n def sample(self, batch_size: int) -> Dict[str, torch.Tensor]:\n\n batch_indexes = np.random.randint(0, self._size, size=batch_size)\n \n return {\n \"observations\": torch.tensor(self.observations[batch_indexes]).to(self.device),\n \"actions\": torch.tensor(self.actions[batch_indexes]).to(self.device),\n \"next_observations\": torch.tensor(self.next_observations[batch_indexes]).to(self.device),\n \"terminals\": torch.tensor(self.terminals[batch_indexes]).to(self.device),\n \"rewards\": torch.tensor(self.rewards[batch_indexes]).to(self.device)\n }\n \n def sample_all(self) -> Dict[str, np.ndarray]:\n return {\n \"observations\": self.observations[:self._size].copy(),\n \"actions\": self.actions[:self._size].copy(),\n \"next_observations\": self.next_observations[:self._size].copy(),\n \"terminals\": self.terminals[:self._size].copy(),\n \"rewards\": self.rewards[:self._size].copy()\n }" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "MBPolicyTrainer", "path": "offlinerlkit/policy_trainer/mb_policy_trainer.py", "snippet": "class MBPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: Union[gym.Env, gymnasium.Env],\n real_buffer: ReplayBuffer,\n fake_buffer: ReplayBuffer,\n logger: Logger,\n rollout_setting: Tuple[int, int, int],\n epoch: int = 1000,\n step_per_epoch: int = 1000,\n batch_size: int = 256,\n real_ratio: float = 0.05,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n dynamics_update_freq: int = 0,\n horizon: Optional[int] = None,\n has_terminal = False,\n binary_ret = False\n ) -> None:\n self.policy = policy\n self.eval_env = eval_env\n self.horizon = horizon\n self.real_buffer = real_buffer\n self.fake_buffer = fake_buffer\n self.logger = logger\n\n self._rollout_freq, self._rollout_batch_size, \\\n self._rollout_length = rollout_setting\n self._dynamics_update_freq = dynamics_update_freq\n\n self._epoch = epoch\n self._step_per_epoch = step_per_epoch\n self._batch_size = batch_size\n self._real_ratio = real_ratio\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n self.binary_ret = binary_ret\n\n def train(self, last_eval = False) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(range(self._step_per_epoch), desc=f\"Epoch #{e}/{self._epoch}\")\n for it in pbar:\n if num_timesteps % self._rollout_freq == 0: # rollout periodically\n init_obss = self.real_buffer.sample(self._rollout_batch_size)[\"observations\"].cpu().numpy()\n rollout_transitions, rollout_info = self.policy.rollout(init_obss, self._rollout_length)\n self.fake_buffer.add_batch(**rollout_transitions)\n self.logger.log(\n \"num rollout transitions: {}, reward mean: {:.4f}\".\\\n format(rollout_info[\"num_transitions\"], rollout_info[\"reward_mean\"])\n )\n for _key, _value in rollout_info.items():\n self.logger.logkv_mean(\"rollout_info/\"+_key, _value)\n\n # Sample from both real (offline data) and fake (rollout data) according to real_ratio\n real_sample_size = int(self._batch_size * self._real_ratio)\n fake_sample_size = self._batch_size - real_sample_size\n real_batch = self.real_buffer.sample(batch_size=real_sample_size)\n fake_batch = self.fake_buffer.sample(batch_size=fake_sample_size)\n batch = {\"real\": real_batch, \"fake\": fake_batch}\n loss = self.policy.learn(batch)\n pbar.set_postfix(**loss)\n\n for k, v in loss.items():\n self.logger.logkv_mean(k, v)\n \n # update the dynamics if necessary\n if 0 < self._dynamics_update_freq and (num_timesteps+1)%self._dynamics_update_freq == 0:\n dynamics_update_info = self.policy.update_dynamics(self.real_buffer)\n for k, v in dynamics_update_info.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n \n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n # evaluate current policy\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n\n if not hasattr(self.eval_env, \"get_normalized_score\"): # gymnasium_env does not have normalized score\n last_10_performance.append(ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std) \n else: \n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.policy.dynamics.save(self.logger.model_dir)\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self) -> Dict[str, List[float]]:\n is_gymnasium_env = self.is_gymnasium_env\n \n self.policy.eval()\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n\n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if not self.has_terminal: # Finite horizon, terminal is unimportant\n while num_episodes < self._eval_episodes:\n for timestep in range(self.horizon): # One epoch\n # print(f\"Timestep {timestep}, obs {obs}\")\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n while num_episodes < self._eval_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), deterministic=True)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_ret:\n episode_reward = 1 if episode_reward >= 1 else 0\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }" }, { "identifier": "COMBOPolicy", "path": "offlinerlkit/policy/model_based/combo.py", "snippet": "class COMBOPolicy(CQLPolicy):\n \"\"\"\n Conservative Offline Model-Based Policy Optimization <Ref: https://arxiv.org/abs/2102.08363>\n \"\"\"\n\n def __init__(\n self,\n dynamics: BaseDynamics,\n actor: nn.Module,\n critic1: nn.Module,\n critic2: nn.Module,\n actor_optim: torch.optim.Optimizer,\n critic1_optim: torch.optim.Optimizer,\n critic2_optim: torch.optim.Optimizer,\n action_space: gym.spaces.Space,\n tau: float = 0.005,\n gamma: float = 0.99,\n alpha: Union[float, Tuple[float, torch.Tensor, torch.optim.Optimizer]] = 0.2,\n cql_weight: float = 1.0,\n temperature: float = 1.0,\n max_q_backup: bool = False,\n deterministic_backup: bool = True,\n with_lagrange: bool = True,\n lagrange_threshold: float = 10.0,\n cql_alpha_lr: float = 1e-4,\n num_repeart_actions:int = 10,\n uniform_rollout: bool = False,\n rho_s: str = \"mix\"\n ) -> None:\n super().__init__(\n actor,\n critic1,\n critic2,\n actor_optim,\n critic1_optim,\n critic2_optim,\n action_space,\n tau=tau,\n gamma=gamma,\n alpha=alpha,\n cql_weight=cql_weight,\n temperature=temperature,\n max_q_backup=max_q_backup,\n deterministic_backup=deterministic_backup,\n with_lagrange=with_lagrange,\n lagrange_threshold=lagrange_threshold,\n cql_alpha_lr=cql_alpha_lr,\n num_repeart_actions=num_repeart_actions\n )\n\n self.dynamics = dynamics\n self._uniform_rollout = uniform_rollout\n self._rho_s = rho_s\n\n def rollout(\n self,\n init_obss: np.ndarray,\n rollout_length: int\n ) -> Tuple[Dict[str, np.ndarray], Dict]:\n\n num_transitions = 0\n rewards_arr = np.array([])\n rollout_transitions = defaultdict(list)\n\n # rollout\n observations = init_obss\n for _ in range(rollout_length):\n if self._uniform_rollout:\n actions = np.random.uniform(\n self.action_space.low[0],\n self.action_space.high[0],\n size=(len(observations), self.action_space.shape[0])\n )\n else:\n actions = self.select_action(observations)\n next_observations, rewards, terminals, info = self.dynamics.step(observations, actions)\n rollout_transitions[\"obss\"].append(observations)\n rollout_transitions[\"next_obss\"].append(next_observations)\n rollout_transitions[\"actions\"].append(actions)\n rollout_transitions[\"rewards\"].append(rewards)\n rollout_transitions[\"terminals\"].append(terminals)\n\n num_transitions += len(observations)\n rewards_arr = np.append(rewards_arr, rewards.flatten())\n\n nonterm_mask = (~terminals).flatten()\n if nonterm_mask.sum() == 0:\n break\n\n observations = next_observations[nonterm_mask]\n \n for k, v in rollout_transitions.items():\n rollout_transitions[k] = np.concatenate(v, axis=0)\n\n return rollout_transitions, \\\n {\"num_transitions\": num_transitions, \"reward_mean\": rewards_arr.mean()}\n \n def learn(self, batch: Dict) -> Dict[str, float]:\n real_batch, fake_batch = batch[\"real\"], batch[\"fake\"]\n # Mix data from real (offline) and fake (rollout)\n mix_batch = {k: torch.cat([real_batch[k], fake_batch[k]], 0) for k in real_batch.keys()}\n\n obss, actions, next_obss, rewards, terminals = mix_batch[\"observations\"], mix_batch[\"actions\"], \\\n mix_batch[\"next_observations\"], mix_batch[\"rewards\"], mix_batch[\"terminals\"]\n batch_size = obss.shape[0]\n \n # update actor\n a, log_probs = self.actforward(obss)\n q1a, q2a = self.critic1(obss, a), self.critic2(obss, a)\n actor_loss = (self._alpha * log_probs - torch.min(q1a, q2a)).mean()\n self.actor_optim.zero_grad()\n actor_loss.backward()\n self.actor_optim.step()\n\n if self._is_auto_alpha:\n log_probs = log_probs.detach() + self._target_entropy\n alpha_loss = -(self._log_alpha * log_probs).mean()\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n self._alpha = self._log_alpha.detach().exp()\n \n # compute td error\n if self._max_q_backup:\n with torch.no_grad():\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, next_obss.shape[-1])\n tmp_next_actions, _ = self.actforward(tmp_next_obss)\n tmp_next_q1 = self.critic1_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n tmp_next_q2 = self.critic2_old(tmp_next_obss, tmp_next_actions) \\\n .view(batch_size, self._num_repeat_actions, 1) \\\n .max(1)[0].view(-1, 1)\n next_q = torch.min(tmp_next_q1, tmp_next_q2)\n else:\n with torch.no_grad():\n next_actions, next_log_probs = self.actforward(next_obss)\n next_q = torch.min(\n self.critic1_old(next_obss, next_actions),\n self.critic2_old(next_obss, next_actions)\n )\n if not self._deterministic_backup:\n next_q -= self._alpha * next_log_probs\n\n target_q = rewards + self._gamma * (1 - terminals) * next_q\n q1, q2 = self.critic1(obss, actions), self.critic2(obss, actions)\n critic1_loss = ((q1 - target_q).pow(2)).mean()\n critic2_loss = ((q2 - target_q).pow(2)).mean()\n\n # compute conservative loss\n if self._rho_s == \"model\":\n obss, actions, next_obss = fake_batch[\"observations\"], \\\n fake_batch[\"actions\"], fake_batch[\"next_observations\"]\n \n batch_size = len(obss)\n random_actions = torch.FloatTensor(\n batch_size * self._num_repeat_actions, actions.shape[-1]\n ).uniform_(self.action_space.low[0], self.action_space.high[0]).to(self.actor.device)\n # tmp_obss & tmp_next_obss: (batch_size * num_repeat, obs_dim)\n tmp_obss = obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n tmp_next_obss = next_obss.unsqueeze(1) \\\n .repeat(1, self._num_repeat_actions, 1) \\\n .view(batch_size * self._num_repeat_actions, obss.shape[-1])\n \n obs_pi_value1, obs_pi_value2 = self.calc_pi_values(tmp_obss, tmp_obss)\n next_obs_pi_value1, next_obs_pi_value2 = self.calc_pi_values(tmp_next_obss, tmp_obss)\n random_value1, random_value2 = self.calc_random_values(tmp_obss, random_actions)\n\n for value in [\n obs_pi_value1, obs_pi_value2, next_obs_pi_value1, next_obs_pi_value2,\n random_value1, random_value2\n ]:\n value.reshape(batch_size, self._num_repeat_actions, 1)\n \n # cat_q shape: (batch_size, 3 * num_repeat, 1)\n cat_q1 = torch.cat([obs_pi_value1, next_obs_pi_value1, random_value1], 1)\n cat_q2 = torch.cat([obs_pi_value2, next_obs_pi_value2, random_value2], 1)\n # Samples from the original dataset\n real_obss, real_actions = real_batch['observations'], real_batch['actions']\n q1, q2 = self.critic1(real_obss, real_actions), self.critic2(real_obss, real_actions)\n\n conservative_loss1 = \\\n torch.logsumexp(cat_q1 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q1.mean() * self._cql_weight\n conservative_loss2 = \\\n torch.logsumexp(cat_q2 / self._temperature, dim=1).mean() * self._cql_weight * self._temperature - \\\n q2.mean() * self._cql_weight\n \n if self._with_lagrange:\n cql_alpha = torch.clamp(self.cql_log_alpha.exp(), 0.0, 1e6)\n conservative_loss1 = cql_alpha * (conservative_loss1 - self._lagrange_threshold)\n conservative_loss2 = cql_alpha * (conservative_loss2 - self._lagrange_threshold)\n\n self.cql_alpha_optim.zero_grad()\n cql_alpha_loss = -(conservative_loss1 + conservative_loss2) * 0.5\n cql_alpha_loss.backward(retain_graph=True)\n self.cql_alpha_optim.step()\n \n critic1_loss = critic1_loss + conservative_loss1\n critic2_loss = critic2_loss + conservative_loss2\n\n # update critic\n self.critic1_optim.zero_grad()\n critic1_loss.backward(retain_graph=True)\n self.critic1_optim.step()\n\n self.critic2_optim.zero_grad()\n critic2_loss.backward()\n self.critic2_optim.step()\n\n self._sync_weight()\n\n result = {\n \"loss/actor\": actor_loss.item(),\n \"loss/critic1\": critic1_loss.item(),\n \"loss/critic2\": critic2_loss.item()\n }\n\n if self._is_auto_alpha:\n result[\"loss/alpha\"] = alpha_loss.item()\n result[\"alpha\"] = self._alpha.item()\n if self._with_lagrange:\n result[\"loss/cql_alpha\"] = cql_alpha_loss.item()\n result[\"cql_alpha\"] = cql_alpha.item()\n \n return result" }, { "identifier": "PickPlaceObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())" }, { "identifier": "DoubleDrawerObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)" }, { "identifier": "get_pickplace_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_pickplace_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n init_obss.append(get_pickplace_obs(obs_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n elif key == 'next_observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "get_doubledrawer_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_doubledrawer_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n info_list = d['env_infos']\n init_obss.append(get_doubledrawer_obs(obs_list[0], info_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n info_list = d['env_infos']\n # initial info is similar to step 1\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, [info_list[0]] + info_list[:-1])]\n elif key == 'next_observations':\n info_list = d['env_infos']\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, info_list)]\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "none_or_str", "path": "offlinerlkit/utils/none_or_str.py", "snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value" } ]
import argparse import os import sys import random import datetime import roboverse import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian, EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.termination_fns import termination_fn_default from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MBPolicyTrainer from offlinerlkit.policy import COMBOPolicy from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.none_or_str import none_or_str
16,115
return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr )
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo-name", type=str, default="combo") parser.add_argument("--task", type=str, default="pickplace", help="pickplace") # Self-constructed environment parser.add_argument("--last_eval", action="store_false") # env config (pickplace) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device) critic1 = Critic(critic1_backbone, args.device) critic2 = Critic(critic2_backbone, args.device) actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr) critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr) critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(actor_optim, args.epoch) if args.auto_alpha: target_entropy = args.target_entropy if args.target_entropy \ else -np.prod(env.action_space.shape) args.target_entropy = target_entropy log_alpha = torch.zeros(1, requires_grad=True, device=args.device) alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr) alpha = (target_entropy, log_alpha, alpha_optim) else: alpha = args.alpha # create dynamics load_dynamics_model = True if args.load_dynamics_path else False dynamics_model = EnsembleDynamicsModel( obs_dim=np.prod(args.obs_shape), action_dim=args.action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr )
scaler = StandardScaler()
6
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sampling_helpers.py", "snippet": "def get_model(cfg_path=\"configs/latent-diffusion/cin256-v2.yaml\", ckpt_path=\"models/ldm/cin256-v2/model.ckpt\"):\n config = OmegaConf.load(cfg_path)\n model = load_model_from_config(config, ckpt_path)\n return model" }, { "identifier": "_unmap_img", "path": "sampling_helpers.py", "snippet": "def _unmap_img(x, from_image_net_dist=False):\n \"\"\"\n from 0 to 1 to -1 to 1\n \"\"\"\n\n return 2. * x - 1" }, { "identifier": "generate_samples", "path": "sampling_helpers.py", "snippet": "def generate_samples(\n model, \n sampler, \n target_y, \n ddim_steps, \n scale, \n init_image=None, \n t_enc=None,\n init_latent=None, \n ccdddim=False, \n ddim_eta=0., \n latent_t_0=True, \n prompts: list = None,\n seed: int = 0\n):\n torch.cuda.empty_cache()\n \n all_samples = []\n all_probs = []\n all_videos = []\n all_masks = []\n all_cgs = []\n\n with torch.no_grad():\n with model.ema_scope():\n tic = time.time()\n print(f\"rendering target classes '{target_y}' in {len(sampler.ddim_timesteps)} or {ddim_steps} steps and using s={scale:.2f}.\")\n batch_size = target_y.shape[0]\n if \"class_label\" == model.cond_stage_key: # class-conditional\n uc = model.get_learned_conditioning({model.cond_stage_key: torch.tensor(batch_size * [1000]).to(model.device)})\n c = model.get_learned_conditioning({model.cond_stage_key: target_y.to(model.device)})\n elif \"txt\" == model.cond_stage_key: # text-conditional\n uc = model.get_learned_conditioning(batch_size * [\"\"])\n if prompts is None:\n raise ValueError(\"Prompts are not defined!\")\n c = model.get_learned_conditioning(prompts)\n else:\n raise NotImplementedError\n \n if init_latent is not None:\n if seed!=-1:\n noises_per_batch = []\n for b in range(batch_size):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n noises_per_batch.append(torch.randn_like(init_latent[b]))\n noise = torch.stack(noises_per_batch, dim=0)\n else:\n noise = None\n z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * (batch_size)).to(\n init_latent.device), noise=noise) if not latent_t_0 else init_latent\n\n if seed!=-1:\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n # decode it\n if ccdddim:\n out = sampler.decode(\n z_enc, \n c, \n t_enc, \n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc, \n y=target_y.to(model.device), \n latent_t_0=latent_t_0,\n )\n samples = out[\"x_dec\"]\n prob = out[\"prob\"]\n vid = out[\"video\"]\n mask = out[\"mask\"]\n cg = out[\"concensus_regions\"]\n\n else:\n samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=scale,\n unconditional_conditioning=uc)\n\n x_samples = model.decode_first_stage(samples)\n x_samples_ddim = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)\n cat_samples = x_samples_ddim #torch.cat([init_image[:1], x_samples_ddim], dim=0)\n else:\n\n samples_ddim, _ = sampler.sample(S=ddim_steps,\n conditioning=c,\n batch_size=batch_size,\n shape=[3, 64, 64],\n verbose=False,\n unconditional_guidance_scale=scale,\n unconditional_conditioning=uc,\n eta=ddim_eta)\n\n x_samples_ddim = model.decode_first_stage(samples_ddim)\n x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0,\n min=0.0, max=1.0)\n cat_samples = x_samples_ddim\n\n all_samples.append(cat_samples)\n all_probs.append(prob) if ccdddim and prob is not None else None\n all_videos.append(vid) if ccdddim and vid is not None else None\n all_masks.append(mask) if ccdddim and mask is not None else None\n all_cgs.append(cg) if ccdddim and cg is not None else None\n tac = time.time()\n\n out = {}\n out[\"samples\"] = all_samples\n out[\"probs\"] = all_probs if len(all_probs) > 0 else None\n out[\"videos\"] = all_videos if len(all_videos) > 0 else None\n out[\"masks\"] = all_masks if len(all_masks) > 0 else None\n out[\"cgs\"] = all_cgs if len(all_cgs) > 0 else None\n \n return out" }, { "identifier": "load_model_hf", "path": "sampling_helpers.py", "snippet": "def load_model_hf(repo_id, filename, dir, ckpt_config_filename, device='cpu'):\n cache_config_file = hf_hub_download(repo_id=repo_id, filename=ckpt_config_filename)\n\n args = SLConfig.fromfile(cache_config_file)\n args.device = device\n model = build_model(args)\n\n cache_file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=dir)\n checkpoint = torch.load(cache_file, map_location='cpu')\n log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)\n print(\"Model loaded from {} \\n => {}\".format(cache_file, log))\n _ = model.eval()\n return model.to(device)" }, { "identifier": "CCMDDIMSampler", "path": "ldm/models/diffusion/cc_ddim.py", "snippet": "class CCMDDIMSampler(object):\n def __init__(self, model, classifier, model_type=\"latent\", schedule=\"linear\", guidance=\"free\", lp_custom=False,\n deg_cone_projection=10., denoise_dist_input=True, classifier_lambda=1, dist_lambda=0.15,\n enforce_same_norms=True, seg_model=None, detect_model=None, masked_guidance=False,\n backprop_diffusion=True, log_backprop_gradients: bool = False, mask_alpha = 5., cone_projection_type= 'default', self_recurrence=0, classifier_wrapper: bool = True, record_intermediate_results:bool=False, verbose:bool=True,**kwargs):\n\n super().__init__()\n self.model_type = model_type\n self.lp_custom = lp_custom\n self.images = []\n self.probs = []\n self.classifier_lambda = classifier_lambda\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n self.classifier = classifier\n self.guidance = guidance\n self.backprop_diffusion = backprop_diffusion\n self.log_backprop_gradients = log_backprop_gradients\n # self.projected_counterfactuals = projected_counterfactuals\n self.deg_cone_projection = deg_cone_projection\n self.cone_projection_type = cone_projection_type\n self.denoise_dist_input = denoise_dist_input\n self.dist_lambda = dist_lambda\n self.enforce_same_norms = enforce_same_norms\n self.seg_model = seg_model\n self.masked_guidance = masked_guidance\n self.mask_alpha = mask_alpha\n self.self_recurrence = self_recurrence\n self.classifier_wrapper = classifier_wrapper\n self.record_intermediate_results = record_intermediate_results\n self.verbose = verbose\n\n self.init_images = None\n self.init_labels = None \n self.mask = None\n self.concensus_regions = []\n \n self.detect_model = detect_model\n self.classification_criterion = torch.nn.CrossEntropyLoss()\n self.binary_classification_criterion = torch.nn.BCEWithLogitsLoss()\n \n self.dino_pipeline = False\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n self.distance_criterion = DinoLoss(dino=torch.hub.load('facebookresearch/dino:main', 'dino_vitb16').eval(), loss_identifier=self.lp_custom.split(\"_\")[-1])\n self.dino_init_features = None\n self.dino_pipeline = True\n elif isinstance(self.lp_custom, int):\n if self.lp_custom == 1:\n self.distance_criterion = torch.nn.L1Loss(reduction='sum')\n elif self.lp_custom == 2:\n self.distance_criterion = torch.nn.MSELoss(reduction='sum')\n else:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n def get_classifier_dist(self, x, t=None):\n \"\"\"\n Create a distribution over the classifier output space\n Args:\n x: input image for which to create the distribution over the classifier output space range [-1, 1]\n\n Returns:\n dist: torch distribution over the classifier output space\n\n \"\"\"\n x = tf.center_crop(x, 224)\n x = normalize(_map_img(x))\n logit = self.classifier(x) # (TODO) add option for t here\n dist = torchd.independent.Independent(OneHotDist(logit, validate_args = False), 0) # 0 here is the batch dimension, so event_shape is (num_classes, )\n return dist\n\n def get_classifier_logits(self, x, t=None):\n \"\"\"\n Returns classifier logits\n Args:\n x: input image for which to create the prediction\n\n Returns:\n logits: logits of output layer of target model\n\n \"\"\"\n x = _map_img(x)\n if not self.classifier_wrapper: # only works for ImageNet!\n x = tf.center_crop(x, 224)\n x = normalize(x)\n return self.classifier(x)\n\n def get_dino_features(self, x, device):\n x = normalize(_map_img(tf.center_crop(x, output_size=224)))\n return self.distance_criterion.dino(x.to(device))\n\n def get_mask_clip_seg(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n if self.mask is not None:\n return self.mask\n\n prompts = []\n\n for l in self.init_labels:\n prompts.append(re.sub(r'\\b(\\w)', lambda m: m.group(1).upper(), i2h[l]))\n\n with torch.no_grad():\n img_to_seg = F.interpolate(normalize(self.init_images), size=(352, 352), mode='bilinear',\n align_corners=False).to(self.init_images.device)\n preds = self.seg_model(img_to_seg, prompts)[0]\n preds = F.interpolate(preds, size=self.init_images.shape[-2:], mode='bilinear', align_corners=False)\n preds = torch.sigmoid(preds) # torch.softmax(preds.view(preds.shape[0], -1), dim=1).view(*preds.shape)\n # penalty = 1-preds\n preds = (preds - preds.min()) / (preds.max() - preds.min())\n preds = torch.sigmoid(self.mask_alpha*2*(preds-0.5))\n self.mask = preds.to(self.init_images.device)\n return self.mask\n\n def get_mask(self):\n \"\"\"\n this function returns a negative mask given by a segmentation model for the region of interest\n values are higher outside the region of interest\n \"\"\"\n\n if self.mask is not None:\n return self.mask\n\n with torch.no_grad():\n print(\"input range\", self.init_images.min(), self.init_images.max())\n image_int8 = (self.init_images[0].permute(1, 2, 0).cpu().numpy() * 255.).astype(np.uint8)\n # detected_boxes = detect(image, text_prompt=i2h[label], model=groundingdino_model, image_source=image_image)\n detected_boxes = detect(normalize(self.init_images[0]).squeeze(),\n text_prompt=i2h[self.init_labels[0]].split(',')[0],\n model=self.detect_model) # , image_source=image_int8)\n segmented_frame_masks = segment(image_int8, self.seg_model, boxes=detected_boxes)\n preds = torch.any(segmented_frame_masks, dim=0)\n preds = preds.unsqueeze(0).repeat(self.init_images.shape[0], *(1,) * len(preds.shape))\n # print(\"preds range after first seg \", preds.min(), preds.max())\n self.mask = preds.to(self.init_images.device)\n\n return self.mask\n\n def get_output(self, x, t, c, index, unconditional_conditioning, use_original_steps=True, quantize_denoised=True,\n return_decoded=False, return_pred_latent_x0=False):\n b, device = x.shape[0], x.device\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n with torch.enable_grad() if self.backprop_diffusion else torch.no_grad():\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n\n if return_decoded:\n # getting the original denoised image\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n # current prediction for x_0\n # get the original image with range [0, 1] if it is in latent space\n pred_latent_x0 = (x - sqrt_one_minus_at * e_t_uncond) / a_t.sqrt() # e_t - > e_t_uncond\n if quantize_denoised:\n pred_latent_x0, _, *_ = self.model.first_stage_model.quantize(pred_latent_x0)\n\n pred_x0 = self.model.differentiable_decode_first_stage(\n pred_latent_x0) # if self.model_type == \"latent\" else pred_latent_x0\n # pred_x0 = torch.clamp((pred_x0 + 1.0) / 2.0, min=0.0, max=1.0)\n \n if return_pred_latent_x0:\n return e_t_uncond, e_t, pred_x0, pred_latent_x0\n else:\n return e_t_uncond, e_t, pred_x0\n else:\n return e_t_uncond, e_t\n\n def conditional_score(self, x, t, c, index, use_original_steps, quantize_denoised, unconditional_guidance_scale=1.,\n unconditional_conditioning=None, y=None):\n \"\"\"\n\n Args:\n x: input image\n t: time step\n c: conditioning\n index: index for the schedule\n use_original_steps: whether to use the original steps\n quantize_denoised: whether to quantize the denoised image\n unconditional_guidance_scale: scale for the unconditional guidance\n unconditional_conditioning: unconditional conditioning\n y: target class\n\n\n Returns:\n e_t: score after conditioning\n\n \"\"\"\n b, *_, device = *x.shape, x.device\n x = x.detach() # .requires_grad_()\n # x.requires_grad = True\n prob_best_class = None\n mask_guidance = None\n\n ## check if gradient tracking is on for x\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n if self.guidance == \"free\":\n e_t_uncond, e_t, pred_x0 = self.get_output(x, t, c, index, unconditional_conditioning, use_original_steps,\n quantize_denoised, return_decoded=True)\n\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n return e_t\n\n # print(\"check gradient tracking onf e \", e_t.requires_grad)\n score_out = torch.zeros_like(x)\n\n with torch.enable_grad():\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n\n with torch.no_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom: # retain_graph causes cuda oom issues for dino distance regularizer...\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.dino_init_features.to(x.device).detach())\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=False)[0]\n elif self.lp_custom:\n with torch.enable_grad():\n pred_x0_0to1 = torch.clamp(_map_img(pred_x0), min=0.0, max=1.0)\n lp_dist = self.distance_criterion(pred_x0_0to1, self.init_images.to(x.device))\n lp_grad = torch.autograd.grad(lp_dist.mean(), x_noise, retain_graph=True)[0]\n \n if self.classifier_lambda != 0:\n with torch.enable_grad():\n if isinstance(self.lp_custom, str) and \"dino_\" in self.lp_custom:\n x_noise = x.detach().requires_grad_()\n ret_vals = self.get_output(x_noise, t, c, index, unconditional_conditioning,\n use_original_steps, quantize_denoised=quantize_denoised,\n return_decoded=True, return_pred_latent_x0=self.log_backprop_gradients)\n if self.log_backprop_gradients:\n e_t_uncond, e_t, pred_x0, pred_latent_x0 = ret_vals\n else:\n e_t_uncond, e_t, pred_x0 = ret_vals\n pred_logits = self.get_classifier_logits(pred_x0)\n if len(pred_logits.shape) == 2: # multi-class\n log_probs = torch.nn.functional.log_softmax(pred_logits, dim=-1)\n log_probs = log_probs[range(log_probs.size(0)), y.view(-1)]\n prob_best_class = torch.exp(log_probs).detach()\n else: # binary\n loss = self.binary_classification_criterion(pred_logits, y)\n loss *= -1 # minimize this\n log_probs = loss\n prob_best_class = pred_logits.sigmoid().detach()\n\n if self.log_backprop_gradients: pred_latent_x0.retain_grad()\n\n if self.dino_pipeline:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=False)[0]\n else:\n grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier = torch.autograd.grad(log_probs.sum(), x_noise, retain_graph=True)[0]\n # grad_classifier2 = torch.autograd.grad(log_probs[0].sum(), x_noise, retain_graph=False)[0]\n\n if self.log_backprop_gradients:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_t_sqrt = a_t.sqrt()\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n grad_pred_latent_x0 = pred_latent_x0.grad.data\n grad_unet_wrt_zt = (grad_classifier*a_t_sqrt/grad_pred_latent_x0 - 1)*(-1/sqrt_one_minus_at)\n\n cossim = torch.nn.CosineSimilarity()\n cossim_wpre = cossim(grad_classifier.view(2, -1), grad_pred_latent_x0.view(2, -1))\n \n print(torch.norm(grad_classifier, dim=(2,3)), torch.norm(grad_pred_latent_x0, dim=(2,3)), torch.norm(grad_unet_wrt_zt, dim=(2,3)))\n print(cossim_wpre)\n\n # assert e_t_uncond.requires_grad == True and e_t.requires_grad == True, \"e_t_uncond and e_t should require gradients\"\n\n # if self.guidance == \"projected\":\n implicit_classifier_score = (e_t - e_t_uncond) # .detach()\n # check gradient tracking on implicit_classifier_score\n assert implicit_classifier_score.requires_grad == False, \"implicit_classifier_score requires grad\"\n\n if self.lp_custom or self.classifier_lambda != 0:\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n\n if self.classifier_lambda != 0:\n classifier_score = -1 * grad_classifier * (1 - a_t).sqrt()\n assert classifier_score.requires_grad == False, \"classifier_score requires grad\"\n # project the gradient of the classifier on the implicit classifier\n\n\n projection_fn = cone_project if self.cone_projection_type == \"default\" else cone_project_chuncked\n projection_fn = cone_project_chuncked_zero if \"zero\" in self.cone_projection_type else projection_fn\n \n \n proj_out = projection_fn(implicit_classifier_score.view(x.shape[0], -1),\n classifier_score.view(x.shape[0], -1),\n self.deg_cone_projection,\n orig_shp=implicit_classifier_score.shape) \\\n if self.guidance == \"projected\" else classifier_score\n \n classifier_score = proj_out if self.cone_projection_type == \"default\" else proj_out[0].view_as(classifier_score)\n concensus_region = proj_out[1].unsqueeze(1) if self.cone_projection_type == \"binning\" else None\n #print(classifier_score.shape, concensus_region.shape)\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(classifier_score,\n implicit_classifier_score) # e_t_uncond (AWAREE!!)\n classifier_score = self.classifier_lambda * score_\n\n else:\n classifier_score *= self.classifier_lambda\n\n score_out += classifier_score\n\n # distance gradients\n if self.lp_custom:\n\n lp_score = -1 * lp_grad * (1 - a_t).sqrt()\n\n if self.enforce_same_norms:\n score_, norm_ = _renormalize_gradient(lp_score,\n implicit_classifier_score)\n lp_score = self.dist_lambda * score_\n\n else:\n\n lp_score *= self.dist_lambda\n\n score_out -= lp_score\n\n e_t = e_t_uncond + unconditional_guidance_scale * score_out # (1 - a_t).sqrt() * grad_out\n\n \n if self.record_intermediate_results:\n # adding images to create a gif\n pred_x0_copy = pred_x0.clone().detach()\n img = torch.clamp(_map_img(pred_x0_copy), min=0.0, max=1.0)\n #img = torch.permute(img, (1, 2, 0, 3)).reshape((img.shape[1], img.shape[2], -1))\n\n self.images.append(img.detach().cpu())\n if self.classifier_lambda != 0 and self.cone_projection_type == \"binning\":\n self.concensus_regions.append(concensus_region.detach().cpu())\n \n if prob_best_class is not None:\n self.probs.append(prob_best_class.detach().cpu())\n\n return e_t\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n #pass\n # TODO: this is a hack to make it work on CPU\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n #print(\"DDIM timesteps: \", self.ddim_timesteps, \"with length: \", len(self.ddim_timesteps))\n #print all input parameters\n #print(\"DDIM parameters: \", self.ddim_timesteps, ddim_discretize, ddim_eta)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, ):\n\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, y=None):\n b, *_, device = *x.shape, x.device\n\n e_t = self.conditional_score(x=x, c=c, t=t, index=index, use_original_steps=use_original_steps,\n quantize_denoised=quantize_denoised,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas).to(x0.device)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas.to(x0.device)\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, y=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, latent_t_0=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n if self.masked_guidance:\n print(\"### Getting the mask ###\")\n mask = self.get_mask()\n mask = F.interpolate(mask.to(torch.uint8), size=x_latent.shape[-2:])\n # mask = self.get_mask()\n # mask = F.interpolate(mask, size=x_latent.shape[-2:], mode='bilinear', align_corners=True)\n # mask = (mask - mask.min()) / (mask.max() - mask.min())\n # mask[mask < 0.5] = 0.\n # mask[mask >= 0.5] = 1.\n\n if self.verbose:\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n else:\n iterator = range(time_range)\n\n # if latent_t_0:\n # x_orig = x_latent\n # x_dec = self.stochastic_encode(x_latent.clone(),\n # torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n # else:\n x_dec = x_latent if not latent_t_0 else self.stochastic_encode(x_latent.clone(), torch.tensor([t_start] * (x_latent.shape[0])).to(x_latent.device))\n for i, step in enumerate(iterator):\n tic = time.time()\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n\n if self.masked_guidance and latent_t_0:\n #print(\"blending with original image\")\n img_orig = self.model.q_sample(x_latent.clone(), ts)\n x_dec = img_orig * (1. - mask) + (mask) * x_dec\n\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning, y=y)\n x_dec = x_dec.detach()\n for j in range(self.self_recurrence):\n print(\"self recurrence\")\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, unconditional_guidance_scale = 1)\n\n #workaround for long running time\n elapsed_time = time.time() - tic\n if elapsed_time > 6:\n print(f\"Iteration time {elapsed_time} exceeded limit 6 secs, terminating program...\")\n print(\"x_dec device: \", x_dec.device)\n sys.exit(1) # Terminate the program with exit code 1 (indicating an error) \n \n out = {}\n out['x_dec'] = x_dec\n out['video'] = torch.stack(self.images, dim=1) if len(self.images) != 0 else None\n out[\"mask\"] = self.mask.to(torch.float32) if self.mask is not None else None\n # print(f\"Video shape: {out['video'].shape}\")\n #out['prob'] = self.probs[-1].item() if len(self.probs) != 0 else None\n out['prob'] = self.probs[-1].detach().cpu().numpy() if len(self.probs) != 0 else None\n out['concensus_regions'] = torch.stack(self.concensus_regions, dim=1) if len(self.concensus_regions) != 0 else None\n #print(out['concensus_regions'].shape, (out[\"concensus_regions\"]>200).to(torch.float32).mean())\n self.images = []\n self.probs = []\n \n self.concensus_regions = []\n self.mask = None\n\n return out" }, { "identifier": "name_map", "path": "data/imagenet_classnames.py", "snippet": "" }, { "identifier": "DecisionDensenetModel", "path": "utils/DecisionDensenetModel.py", "snippet": "class DecisionDensenetModel(nn.Module):\n\n def __init__(self, num_classes=40, pretrained=False, query_label=-1):\n super().__init__()\n self.feat_extract = DenseNet121(pretrained=pretrained)\n self.classifier = nn.Linear(self.feat_extract.output_size, num_classes)\n self.query_label = query_label\n\n def forward(self, x, before_sigmoid=True):\n\n x = self.feat_extract(x)\n x = self.classifier(x)\n if not before_sigmoid:\n x = torch.sigmoid(x)\n return x[:, self.query_label]" }, { "identifier": "Normalizer", "path": "utils/preprocessor.py", "snippet": "class Normalizer(torch.nn.Module):\n '''\n normalizing module. Useful for computing the gradient\n to a x image (x in [0, 1]) when using a classifier with\n different normalization inputs (i.e. f((x - mu) / sigma))\n '''\n def __init__(self, classifier,\n mu=[0.485, 0.456, 0.406],\n sigma=[0.229, 0.224, 0.225]):\n super().__init__()\n self.classifier = classifier\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "CropAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class CropAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224, mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n # x = F.center_crop(x, self.crop_size)\n x = self.center_crop(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "ResizeAndNormalizer", "path": "utils/preprocessor.py", "snippet": "class ResizeAndNormalizer(torch.nn.Module):\n def __init__(self, classifier, resolution: tuple=(224, 224), mu=[0.485, 0.456, 0.406], sigma=[0.229, 0.224, 0.225]) -> None:\n super().__init__()\n self.classifier = classifier\n self.resolution = resolution\n self.resize = torchvision.transforms.Resize(resolution)\n self.register_buffer('mu', torch.tensor(mu).view(1, -1, 1, 1))\n self.register_buffer('sigma', torch.tensor(sigma).view(1, -1, 1, 1))\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.resize(x)\n x = (x - self.mu) / self.sigma\n return self.classifier(x)" }, { "identifier": "GenericPreprocessing", "path": "utils/preprocessor.py", "snippet": "class GenericPreprocessing(torch.nn.Module):\n def __init__(self, classifier, preprocessor) -> None:\n super().__init__()\n self.classifier = classifier\n self.preprocessor = preprocessor\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.preprocessor(x)\n return self.classifier(x)" }, { "identifier": "Crop", "path": "utils/preprocessor.py", "snippet": "class Crop(torch.nn.Module):\n def __init__(self, classifier, crop_size: int=224) -> None:\n super().__init__()\n self.classifier = classifier\n self.crop_size = crop_size\n self.center_crop = torchvision.transforms.CenterCrop(crop_size)\n\n def forward(self, x):\n # assumes x in [0, 1]!\n x = self.center_crop(x)\n return self.classifier(x)" }, { "identifier": "VisionLanguageWrapper", "path": "utils/vision_language_wrapper.py", "snippet": "class VisionLanguageWrapper(nn.Module):\n def __init__(self, model, tokenizer, prompts) -> None:\n super().__init__()\n self.model = model\n self.tokenizer = tokenizer\n self.prompts = prompts\n\n device = next(self.model.parameters()).device\n\n text = tokenizer(prompts)\n with torch.no_grad():\n self.text_features = model.encode_text(text.to(device))\n self.text_features = self.text_features / self.text_features.norm(dim=-1, keepdim=True)\n\n def forward(self, x):\n image_features = self.model.encode_image(x)\n image_features = image_features / image_features.norm(dim=-1, keepdim=True)\n logits = 100.0 * image_features @ self.text_features.T\n return logits" }, { "identifier": "MadryNet", "path": "utils/madry_net.py", "snippet": "def MadryNet(ckpt, device):\n norm = \"l2\"\n model = load_model(\n modelname=\"Engstrom2019Robustness\", norm=norm, device=device\n )\n state_dict = torch.load(ckpt, map_location=\"cpu\")\n model.model.load_state_dict(state_dict, strict=True)\n return model" }, { "identifier": "LinearClassifier", "path": "utils/dino_linear.py", "snippet": "class LinearClassifier(nn.Module):\n \"\"\"Linear layer to train on top of frozen features\"\"\"\n def __init__(self, dim, num_labels=1000):\n super(LinearClassifier, self).__init__()\n self.num_labels = num_labels\n self.linear = nn.Linear(dim, num_labels)\n self.linear.weight.data.normal_(mean=0.0, std=0.01)\n self.linear.bias.data.zero_()\n\n def forward(self, x):\n # flatten\n x = x.view(x.size(0), -1)\n\n # linear layer\n return self.linear(x)" }, { "identifier": "DINOLinear", "path": "utils/dino_linear.py", "snippet": "class DINOLinear(nn.Module):\n def __init__(self, dino, linear_classifier) -> None:\n super().__init__()\n self.dino = dino\n self.linear = linear_classifier\n \n def forward(self, x):\n x = self.dino(x)\n return self.linear(x)" } ]
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
14,675
print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval() classifier_model.train = disabled_train ddim_steps = cfg.ddim_steps ddim_eta = cfg.ddim_eta scale = cfg.scale #for unconditional guidance strength = cfg.strength #for unconditional guidance sampler = CCMDDIMSampler(model, classifier_model, seg_model= None, classifier_wrapper="classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper, record_intermediate_results=cfg.record_intermediate_results, verbose=cfg.verbose, **cfg.sampler) sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False) assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(strength * len(sampler.ddim_timesteps)) assert len(sampler.ddim_timesteps) == ddim_steps, "ddim_steps should be equal to len(sampler.ddim_timesteps)" n_samples_per_class = cfg.n_samples_per_class batch_size = cfg.data.batch_size shuffle = cfg.get("shuffle", False) #save config to the output directory #check if the config file already exists else create a config file config_path = os.path.join(out_dir, "config.yaml") if os.path.exists(config_path): print("config file already exists! skipping ...") else: with open(os.path.join(out_dir, "config.yaml"), 'w') as f: print("saving config to ", os.path.join(out_dir, "config.yaml ...")) yaml.dump(config, f) os.chmod(os.path.join(out_dir, "config.yaml"), 0o555) #data_path = cfg.data_path dataset = get_dataset(cfg, last_data_idx=last_data_idx) print("dataset length: ", len(dataset)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) if "ImageNet" in cfg.data._target_: i2h = name_map elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] if 31 == cfg.data.query_label: i2h = ["no smile", "smile"] elif 39 == cfg.data.query_label: i2h = ["old", "young"] else: raise NotImplementedError elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else:
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device) classifier_model = DINOLinear(dino, linear_classifier) transforms_list = [transforms.CenterCrop(224), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))] classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) elif "OxfordIIIPets" in cfg.data._target_: # zero-shot OpenClip: https://arxiv.org/pdf/2212.07143.pdf model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') model = model.to(device).eval() tokenizer = open_clip.get_tokenizer('ViT-B-32') # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) prompts = [f"a photo of a {label}, a type of pet." for label in pets_idx_to_classname.values()] classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval() classifier_model.train = disabled_train ddim_steps = cfg.ddim_steps ddim_eta = cfg.ddim_eta scale = cfg.scale #for unconditional guidance strength = cfg.strength #for unconditional guidance sampler = CCMDDIMSampler(model, classifier_model, seg_model= None, classifier_wrapper="classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper, record_intermediate_results=cfg.record_intermediate_results, verbose=cfg.verbose, **cfg.sampler) sampler.make_schedule(ddim_num_steps=ddim_steps, ddim_eta=ddim_eta, verbose=False) assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]' t_enc = int(strength * len(sampler.ddim_timesteps)) assert len(sampler.ddim_timesteps) == ddim_steps, "ddim_steps should be equal to len(sampler.ddim_timesteps)" n_samples_per_class = cfg.n_samples_per_class batch_size = cfg.data.batch_size shuffle = cfg.get("shuffle", False) #save config to the output directory #check if the config file already exists else create a config file config_path = os.path.join(out_dir, "config.yaml") if os.path.exists(config_path): print("config file already exists! skipping ...") else: with open(os.path.join(out_dir, "config.yaml"), 'w') as f: print("saving config to ", os.path.join(out_dir, "config.yaml ...")) yaml.dump(config, f) os.chmod(os.path.join(out_dir, "config.yaml"), 0o555) #data_path = cfg.data_path dataset = get_dataset(cfg, last_data_idx=last_data_idx) print("dataset length: ", len(dataset)) data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) if "ImageNet" in cfg.data._target_: i2h = name_map elif "CelebAHQDataset" in cfg.data._target_: # query label 31 (smile): label=0 <-> no smile and label=1 <-> smile # query label 39 (age): label=0 <-> old and label=1 <-> young assert cfg.data.query_label in [31, 39] if 31 == cfg.data.query_label: i2h = ["no smile", "smile"] elif 39 == cfg.data.query_label: i2h = ["old", "young"] else: raise NotImplementedError elif "Flowers102" in cfg.data._target_: with open("data/flowers_idx_to_label.json", "r") as f: flowers_idx_to_classname = json.load(f) flowers_idx_to_classname = {int(k)-1: v for k, v in flowers_idx_to_classname.items()} i2h = flowers_idx_to_classname elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) i2h = {int(k): v for k, v in pets_idx_to_classname.items()} else: raise NotImplementedError if "ImageNet" in cfg.data._target_: with open('data/synset_closest_idx.yaml', 'r') as file: synset_closest_idx = yaml.safe_load(file) elif "Flowers102" in cfg.data._target_: with open("data/flowers_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} elif "OxfordIIIPets" in cfg.data._target_: with open("data/pets_closest_indices.json") as file: closest_indices = json.load(file) closest_indices = {int(k):v for k,v in closest_indices.items()} if not cfg.resume: torch.save({"last_data_idx": -1}, checkpoint_path) seed = cfg.seed if "seed" in cfg else 0 set_seed(seed=seed) for i, batch in enumerate(data_loader): if "fixed_seed" in cfg: set_seed(seed=cfg.get("seed", 0)) if cfg.fixed_seed else None seed = seed if cfg.fixed_seed else -1 if "return_tgt_cls" in cfg.data and cfg.data.return_tgt_cls: image, label, tgt_classes, unique_data_idx = batch tgt_classes = tgt_classes.to(device) #squeeze() else: image, label, unique_data_idx = batch if "ImageNet" in cfg.data._target_: tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) elif "CelebAHQDataset" in cfg.data._target_: tgt_classes = (1 - label).type(torch.float32) elif "Flowers102" in cfg.data._target_ or "OxfordIIIPets" in cfg.data._target_: tgt_classes = torch.tensor([closest_indices[unique_data_idx[l].item()*cfg.data.num_shards + cfg.data.shard][0] for l in range(label.shape[0])]).to(device) else: raise NotImplementedError image = image.to(device) #squeeze() label = label.to(device) #.item() #squeeze() #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #tgt_classes = synset_closest_idx[label] #tgt_classes = torch.tensor([random.choice(synset_closest_idx[l.item()]) for l in label]).to(device) #shuffle tgt_classes #random.shuffle(tgt_classes) #get classifcation prediction with torch.inference_mode(): #with precision_scope(): if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: logits = classifier_model(image) else:
logits = sampler.get_classifier_logits(_unmap_img(image)) #converting to -1, 1
2
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n\n # # check if a file named 'poses_global_dvo.txt' exists in the basedir / sequence folder\n # if os.path.isfile(os.path.join(basedir, sequence, \"poses_global_dvo.txt\")):\n # self.pose_path = os.path.join(basedir, sequence, \"poses_global_dvo.txt\")\n\n if \"odomfile\" in kwargs.keys():\n self.pose_path = os.path.join(self.input_folder, kwargs[\"odomfile\"])\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n if self.pose_path is None:\n print(\"WARNING: Dataset does not contain poses. Returning identity transform.\")\n return [torch.eye(4).float() for _ in range(self.num_imgs)]\n else:\n # Determine whether the posefile ends in \".log\"\n # a .log file has the following format for each frame\n # frame_idx frame_idx+1\n # row 1 of 4x4 transform\n # row 2 of 4x4 transform\n # row 3 of 4x4 transform\n # row 4 of 4x4 transform\n # [repeat for all frames]\n #\n # on the other hand, the \"poses_o3d.txt\" or \"poses_dvo.txt\" files have the format\n # 16 entries of 4x4 transform\n # [repeat for all frames]\n if self.pose_path.endswith(\".log\"):\n # print(\"Loading poses from .log format\")\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n if len(lines) % 5 != 0:\n raise ValueError(\n \"Incorrect file format for .log odom file \" \"Number of non-empty lines must be a multiple of 5\"\n )\n num_lines = len(lines) // 5\n for i in range(0, num_lines):\n _curpose = []\n _curpose.append(list(map(float, lines[5 * i + 1].split())))\n _curpose.append(list(map(float, lines[5 * i + 2].split())))\n _curpose.append(list(map(float, lines[5 * i + 3].split())))\n _curpose.append(list(map(float, lines[5 * i + 4].split())))\n _curpose = np.array(_curpose).reshape(4, 4)\n poses.append(torch.from_numpy(_curpose))\n else:\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n if len(line.split()) == 0:\n continue\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n poses.append(torch.from_numpy(c2w))\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding # .permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "load_dataset_config", "path": "datasets/gradslam_datasets/dataconfig.py", "snippet": "def load_dataset_config(path, default_path=None):\n \"\"\"\n Loads config file.\n\n Args:\n path (str): path to config file.\n default_path (str, optional): whether to use default path. Defaults to None.\n\n Returns:\n cfg (dict): config dict.\n\n \"\"\"\n # load configuration from file itself\n with open(path, \"r\") as f:\n cfg_special = yaml.full_load(f)\n\n # check if we should inherit from a config\n inherit_from = cfg_special.get(\"inherit_from\")\n\n # if yes, load this config first as default\n # if no, use the default_path\n if inherit_from is not None:\n cfg = load_dataset_config(inherit_from, default_path)\n elif default_path is not None:\n with open(default_path, \"r\") as f:\n cfg = yaml.full_load(f)\n else:\n cfg = dict()\n\n # include main configuration\n update_recursive(cfg, cfg_special)\n\n return cfg" }, { "identifier": "ICLDataset", "path": "datasets/gradslam_datasets/icl.py", "snippet": "class ICLDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict: Dict,\n basedir: Union[Path, str],\n sequence: Union[Path, str],\n stride: Optional[int] = 1,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[Union[Path, str]] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n embedding_file_extension: Optional[str] = \"pt\",\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # Attempt to find pose file (*.gt.sim)\n self.pose_path = glob.glob(os.path.join(self.input_folder, \"*.gt.sim\"))\n if self.pose_path == 0:\n raise ValueError(\"Need pose file ending in extension `*.gt.sim`\")\n self.pose_path = self.pose_path[0]\n self.embedding_file_extension = embedding_file_extension\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/rgb/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(\n glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.{self.embedding_file_extension}\")\n )\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n\n lines = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n\n _posearr = []\n for line in lines:\n line = line.strip().split()\n if len(line) == 0:\n continue\n _npvec = np.asarray([float(line[0]), float(line[1]), float(line[2]), float(line[3])])\n _posearr.append(_npvec)\n _posearr = np.stack(_posearr)\n\n for pose_line_idx in range(0, _posearr.shape[0], 3):\n _curpose = np.zeros((4, 4))\n _curpose[3, 3] = 3\n _curpose[0] = _posearr[pose_line_idx]\n _curpose[1] = _posearr[pose_line_idx + 1]\n _curpose[2] = _posearr[pose_line_idx + 2]\n poses.append(torch.from_numpy(_curpose).float())\n\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ReplicaDataset", "path": "datasets/gradslam_datasets/replica.py", "snippet": "class ReplicaDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"traj.txt\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/results/frame*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/results/depth*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for i in range(self.num_imgs):\n line = lines[i]\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n # c2w[:3, 1] *= -1\n # c2w[:3, 2] *= -1\n c2w = torch.from_numpy(c2w).float()\n poses.append(c2w)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ScannetDataset", "path": "datasets/gradslam_datasets/scannet.py", "snippet": "class ScannetDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Ai2thorDataset", "path": "datasets/gradslam_datasets/ai2thor.py", "snippet": "class Ai2thorDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n if self.embedding_dir == \"embed_semseg\":\n # embed_semseg is stored as uint16 pngs\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.png\"))\n else:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n if self.embedding_dir == \"embed_semseg\":\n embedding = imageio.imread(embedding_file_path) # (H, W)\n embedding = cv2.resize(\n embedding, (self.desired_width, self.desired_height), interpolation=cv2.INTER_NEAREST\n )\n embedding = torch.from_numpy(embedding).long() # (H, W)\n embedding = F.one_hot(embedding, num_classes=self.embedding_dim) # (H, W, C)\n embedding = embedding.half() # (H, W, C)\n embedding = embedding.permute(2, 0, 1) # (C, H, W)\n embedding = embedding.unsqueeze(0) # (1, C, H, W)\n else:\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "RealsenseDataset", "path": "datasets/gradslam_datasets/realsense.py", "snippet": "class RealsenseDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to process depth images captured by realsense camera on the tabletop manipulator\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # only poses/images/depth corresponding to the realsense_camera_order are read/used\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.jpg\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Record3DDataset", "path": "datasets/gradslam_datasets/record3d.py", "snippet": "class Record3DDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to read in saved files from the structure created by our\n `save_record3d_stream.py` script\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.png\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "TUMDataset", "path": "datasets/gradslam_datasets/tum.py", "snippet": "class TUMDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def parse_list(self, filepath, skiprows=0):\n \"\"\" read list data \"\"\"\n data = np.loadtxt(filepath, delimiter=' ',\n dtype=np.unicode_, skiprows=skiprows)\n return data\n\n def associate_frames(self, tstamp_image, tstamp_depth, tstamp_pose, max_dt=0.08):\n \"\"\" pair images, depths, and poses \"\"\"\n associations = []\n for i, t in enumerate(tstamp_image):\n if tstamp_pose is None:\n j = np.argmin(np.abs(tstamp_depth - t))\n if (np.abs(tstamp_depth[j] - t) < max_dt):\n associations.append((i, j))\n\n else:\n j = np.argmin(np.abs(tstamp_depth - t))\n k = np.argmin(np.abs(tstamp_pose - t))\n\n if (np.abs(tstamp_depth[j] - t) < max_dt) and \\\n (np.abs(tstamp_pose[k] - t) < max_dt):\n associations.append((i, j, k))\n\n return associations\n\n def pose_matrix_from_quaternion(self, pvec):\n \"\"\" convert 4x4 pose matrix to (t, q) \"\"\"\n from scipy.spatial.transform import Rotation\n\n pose = np.eye(4)\n pose[:3, :3] = Rotation.from_quat(pvec[3:]).as_matrix()\n pose[:3, 3] = pvec[:3]\n return pose\n\n def get_filepaths(self):\n\n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, depth_paths = [], []\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n\n embedding_paths = None\n\n return color_paths, depth_paths, embedding_paths\n \n def load_poses(self):\n \n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, poses, depth_paths, intrinsics = [], [], [], []\n inv_pose = None\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n c2w = self.pose_matrix_from_quaternion(pose_vecs[k])\n c2w = torch.from_numpy(c2w).float()\n poses += [c2w]\n\n return poses\n \n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1)" }, { "identifier": "ScannetPPDataset", "path": "datasets/gradslam_datasets/scannetpp.py", "snippet": "class ScannetPPDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n ignore_bad: Optional[bool] = False,\n use_train_split: Optional[bool] = True,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1168,\n desired_width: Optional[int] = 1752,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"scannetpp\"\n self.pose_path = None\n self.ignore_bad = ignore_bad\n self.use_train_split = use_train_split\n\n # Load Train & Test Split\n self.train_test_split = json.load(open(f\"{self.input_folder}/dslr/train_test_lists.json\", \"r\"))\n if self.use_train_split:\n self.image_names = self.train_test_split[\"train\"]\n else:\n self.image_names = self.train_test_split[\"test\"]\n self.train_image_names = self.train_test_split[\"train\"]\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n if self.use_train_split:\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n else:\n self.frames_metadata = self.cams_metadata[\"test_frames\"]\n self.train_frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n self.train_filepath_index_mapping = create_filepath_index_mapping(self.train_frames_metadata) \n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 1000.0 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/dslr/nerfstudio/transforms_undistorted.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}/dslr\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n if not self.use_train_split:\n self.first_train_image_name = self.train_image_names[0]\n self.first_train_image_index = self.train_filepath_index_mapping.get(self.first_train_image_name)\n self.first_train_frame_metadata = self.train_frames_metadata[self.first_train_image_index]\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{self.first_train_image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{self.first_train_image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of first train frame in GradSLAM format\n c2w = torch.from_numpy(np.array(self.first_train_frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Check if frame is blurry and if it needs to be ignored\n if self.ignore_bad and frame_metadata['is_bad']:\n continue\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of undistorted image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "NeRFCaptureDataset", "path": "datasets/gradslam_datasets/nerfcapture.py", "snippet": "class NeRFCaptureDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1440,\n desired_width: Optional[int] = 1920,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"nerfcapture\"\n self.pose_path = None\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n\n # Load RGB & Depth filepaths\n self.image_names = natsorted(os.listdir(f\"{self.input_folder}/rgb\"))\n self.image_names = [f'rgb/{image_name}' for image_name in self.image_names]\n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 6553.5 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/transforms.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Get path of image and depth\n color_path = f\"{base_path}/{image_name}\"\n depth_path = f\"{base_path}/{image_name.replace('rgb', 'depth')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_seq_params", "path": "utils/common_utils.py", "snippet": "def save_seq_params(all_params, output_dir):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_seq_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_seq_params_ckpt(all_params, output_dir,time_idx):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "params2rendervar", "path": "utils/gs_helpers.py", "snippet": "def params2rendervar(params):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': params['rgb_colors'],\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def params2depthplussilhouette(params, w2c):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': get_depth_and_silhouette(params['means3D'], w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transformed_params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def transformed_params2depthplussilhouette(params, w2c, transformed_pts):\n rendervar = {\n 'means3D': transformed_pts,\n 'colors_precomp': get_depth_and_silhouette(transformed_pts, w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transform_to_frame", "path": "utils/gs_helpers.py", "snippet": "def transform_to_frame(params, time_idx, gaussians_grad, camera_grad):\n \"\"\"\n Function to transform Isotropic Gaussians from world frame to camera frame.\n \n Args:\n params: dict of parameters\n time_idx: time index to transform to\n gaussians_grad: enable gradients for Gaussians\n camera_grad: enable gradients for camera pose\n \n Returns:\n transformed_pts: Transformed Centers of Gaussians\n \"\"\"\n # Get Frame Camera Pose\n if camera_grad:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx])\n cam_tran = params['cam_trans'][..., time_idx]\n else:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n cam_tran = params['cam_trans'][..., time_idx].detach()\n rel_w2c = torch.eye(4).cuda().float()\n rel_w2c[:3, :3] = build_rotation(cam_rot)\n rel_w2c[:3, 3] = cam_tran\n\n # Get Centers and norm Rots of Gaussians in World Frame\n if gaussians_grad:\n pts = params['means3D']\n else:\n pts = params['means3D'].detach()\n \n # Transform Centers and Unnorm Rots of Gaussians to Camera Frame\n pts_ones = torch.ones(pts.shape[0], 1).cuda().float()\n pts4 = torch.cat((pts, pts_ones), dim=1)\n transformed_pts = (rel_w2c @ pts4.T).T[:, :3]\n\n return transformed_pts" }, { "identifier": "report_progress", "path": "utils/gs_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n\n # Initialize Render Variables\n rendervar = params2rendervar(params)\n depth_sil_rendervar = params2depthplussilhouette(params, data['w2c'])\n\n # Initialize Render Variables\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n\n if not mapping:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n else:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_run.log({f\"{stage} PSNR\": psnr, f\"{stage} RMSE\": rmse}, step=wandb_step)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "eval", "path": "utils/gs_helpers.py", "snippet": "def eval(dataset, final_params, num_frames, eval_dir, sil_thres, mapping_iters, add_new_gaussians, wandb_run=None, wandb_save_qual=False):\n print(\"Evaluating Final Parameters ...\")\n psnr_list = []\n rmse_list = []\n lpips_list = []\n ssim_list = []\n plot_dir = os.path.join(eval_dir, \"plots\")\n os.makedirs(plot_dir, exist_ok=True)\n\n gt_w2c_list = []\n for time_idx in tqdm(range(num_frames)):\n # Get RGB-D Data & Camera Parameters\n color, depth, intrinsics, pose = dataset[time_idx]\n gt_w2c = torch.linalg.inv(pose)\n gt_w2c_list.append(gt_w2c)\n intrinsics = intrinsics[:3, :3]\n\n # Process RGB-D Data\n color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W)\n depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W)\n\n # Process Camera Parameters\n w2c = torch.linalg.inv(pose)\n if time_idx == 0:\n first_frame_w2c = w2c\n # Setup Camera\n cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy())\n \n # Define current frame data\n curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': time_idx, 'intrinsics': intrinsics, 'w2c': w2c}\n\n # Initialize Render Variables\n rendervar = params2rendervar(final_params)\n depth_sil_rendervar = params2depthplussilhouette(final_params, w2c)\n\n # Render Depth & Silhouette\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (curr_data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n \n # Render RGB and Calculate PSNR\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n if mapping_iters==0 and not add_new_gaussians:\n weighted_im = im * presence_sil_mask\n weighted_gt_im = curr_data['im'] * presence_sil_mask\n psnr = calc_psnr(weighted_im, weighted_gt_im).mean()\n ssim = ms_ssim(weighted_im.unsqueeze(0).cpu(), weighted_gt_im.unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(weighted_im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(weighted_gt_im.unsqueeze(0), 0.0, 1.0)).item()\n else:\n psnr = calc_psnr(im, curr_data['im']).mean()\n ssim = ms_ssim(im.unsqueeze(0).cpu(), curr_data['im'].unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(curr_data['im'].unsqueeze(0), 0.0, 1.0)).item()\n\n psnr_list.append(psnr.cpu().numpy())\n ssim_list.append(ssim.cpu().numpy())\n lpips_list.append(lpips_score)\n\n # Compute Depth RMSE\n if mapping_iters==0 and not add_new_gaussians:\n diff_depth_rmse = torch.sqrt((((rastered_depth - curr_data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - curr_data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n rmse_list.append(rmse.cpu().numpy())\n\n # Plot the Ground Truth and Rasterized RGB & Depth, along with Silhouette\n fig_title = \"Time Step: {}\".format(time_idx)\n plot_name = \"%04d\" % time_idx\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n if wandb_run is None:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True)\n elif wandb_save_qual:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True,\n wandb_run=wandb_run, wandb_step=None, \n wandb_title=\"Eval Qual Viz\")\n\n # Compute Average Metrics\n psnr_list = np.array(psnr_list)\n rmse_list = np.array(rmse_list)\n ssim_list = np.array(ssim_list)\n lpips_list = np.array(lpips_list)\n avg_psnr = psnr_list.mean()\n avg_rmse = rmse_list.mean()\n avg_ssim = ssim_list.mean()\n avg_lpips = lpips_list.mean()\n print(\"Average PSNR: {:.2f}\".format(avg_psnr))\n print(\"Average Depth RMSE: {:.2f}\".format(avg_rmse))\n print(\"Average MS-SSIM: {:.2f}\".format(avg_ssim))\n print(\"Average LPIPS: {:.2f}\".format(avg_lpips))\n\n if wandb_run is not None:\n wandb_run.log({\"Average PSNR\": avg_psnr, \"Average Depth RMSE\": avg_rmse, \"Average MS-SSIM\": avg_ssim, \"Average LPIPS\": avg_lpips})\n\n # # Save metric lists as text files\n # np.savetxt(os.path.join(eval_dir, \"psnr.txt\"), psnr_list)\n # np.savetxt(os.path.join(eval_dir, \"rmse.txt\"), rmse_list)\n # np.savetxt(os.path.join(eval_dir, \"ssim.txt\"), ssim_list)\n # np.savetxt(os.path.join(eval_dir, \"lpips.txt\"), lpips_list)\n\n # # Plot PSNR & RMSE as line plots\n # fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n # axs[0].plot(np.arange(num_frames), psnr_list)\n # axs[0].set_title(\"RGB PSNR\")\n # axs[0].set_xlabel(\"Time Step\")\n # axs[0].set_ylabel(\"PSNR\")\n # axs[1].plot(np.arange(num_frames), rmse_list)\n # axs[1].set_title(\"Depth RMSE\")\n # axs[1].set_xlabel(\"Time Step\")\n # axs[1].set_ylabel(\"RMSE\")\n # fig.suptitle(\"Average PSNR: {:.2f}, Average Depth RMSE: {:.2f}\".format(avg_psnr, avg_rmse), y=1.05, fontsize=16)\n # plt.savefig(os.path.join(eval_dir, \"metrics.png\"), bbox_inches='tight')\n # if wandb_run is not None:\n # wandb_run.log({\"Eval Metrics\": fig})\n # plt.close()" }, { "identifier": "l1_loss_v1", "path": "utils/gs_helpers.py", "snippet": "def l1_loss_v1(x, y):\n return torch.abs((x - y)).mean()" }, { "identifier": "matrix_to_quaternion", "path": "utils/gs_helpers.py", "snippet": "def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert rotations given as rotation matrices to quaternions.\n\n Args:\n matrix: Rotation matrices as tensor of shape (..., 3, 3).\n\n Returns:\n quaternions with real part first, as tensor of shape (..., 4).\n Source: https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html#matrix_to_quaternion\n \"\"\"\n if matrix.size(-1) != 3 or matrix.size(-2) != 3:\n raise ValueError(f\"Invalid rotation matrix shape {matrix.shape}.\")\n\n batch_dim = matrix.shape[:-2]\n m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(\n matrix.reshape(batch_dim + (9,)), dim=-1\n )\n\n q_abs = _sqrt_positive_part(\n torch.stack(\n [\n 1.0 + m00 + m11 + m22,\n 1.0 + m00 - m11 - m22,\n 1.0 - m00 + m11 - m22,\n 1.0 - m00 - m11 + m22,\n ],\n dim=-1,\n )\n )\n\n # we produce the desired quaternion multiplied by each of r, i, j, k\n quat_by_rijk = torch.stack(\n [\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),\n ],\n dim=-2,\n )\n\n # We floor here at 0.1 but the exact level is not important; if q_abs is small,\n # the candidate won't be picked.\n flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)\n quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))\n\n # if not for numerical problems, quat_candidates[i] should be same (up to a sign),\n # forall i; we pick the best-conditioned one (with the largest denominator)\n\n return quat_candidates[\n F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :\n ].reshape(batch_dim + (4,))" }, { "identifier": "calc_ssim", "path": "utils/gs_external.py", "snippet": "def calc_ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "build_rotation", "path": "utils/gs_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "densify", "path": "utils/gs_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_clone] \n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n #track new variables for new formed points\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_split].repeat(n)\n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n \n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_expon_lr_func", "path": "utils/gs_external.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "update_learning_rate", "path": "utils/gs_external.py", "snippet": "def update_learning_rate(optimizer, means3D_scheduler, iteration):\n ''' Learning rate scheduling per step '''\n for param_group in optimizer.param_groups:\n if param_group[\"name\"] == \"means3D\":\n lr = means3D_scheduler(iteration)\n param_group['lr'] = lr\n return lr" } ]
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
20,186
use_train_split=dataset_config["use_train_split"], ) num_frames = dataset_config["num_frames"] if num_frames == -1: num_frames = len(mapping_dataset) eval_num_frames = dataset_config["eval_num_frames"] if eval_num_frames == -1: eval_num_frames = len(eval_dataset) # Initialize Parameters, Optimizer & Canoncial Camera parameters ckpt_path = config["data"]["param_ckpt_path"] params, variables, optimizer, intrinsics, w2c, cam = initialize_first_timestep_from_ckpt(ckpt_path,mapping_dataset, num_frames, config['train']['lrs_mapping'], config['mean_sq_dist_method']) _, _, map_intrinsics, _ = mapping_dataset[0] # Load all RGBD frames - Mapping dataloader color_all_frames_map = [] depth_all_frames_map = [] gt_w2c_all_frames_map = [] gs_cams_all_frames_map = [] for time_idx in range(num_frames): color, depth, _, gt_pose = mapping_dataset[time_idx] # Process poses gt_w2c = torch.linalg.inv(gt_pose) # Process RGB-D Data color = color.permute(2, 0, 1) / 255 depth = depth.permute(2, 0, 1) color_all_frames_map.append(color) depth_all_frames_map.append(depth) gt_w2c_all_frames_map.append(gt_w2c) # Setup Gaussian Splatting Camera gs_cam = setup_camera(color.shape[2], color.shape[1], map_intrinsics.cpu().numpy(), gt_w2c.detach().cpu().numpy()) gs_cams_all_frames_map.append(gs_cam) # Iterate over Scan for time_idx in tqdm(range(num_frames)): # Optimization Iterations num_iters_mapping = config['train']['num_iters_mapping'] # Initialize current frame data iter_time_idx = time_idx color = color_all_frames_map[iter_time_idx] depth = depth_all_frames_map[iter_time_idx] curr_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette # if time_idx > 0: # params, variables = add_new_gaussians(params, variables, curr_data, # config['train']['sil_thres'], time_idx, # config['mean_sq_dist_method']) post_num_pts = params['means3D'].shape[0] if config['use_wandb']: wandb_run.log({"Init/Number of Gaussians": post_num_pts, "Init/step": wandb_time_step}) # Reset Optimizer & Learning Rates for Full Map Optimization optimizer = initialize_optimizer(params, config['train']['lrs_mapping']) means3D_scheduler = get_expon_lr_func(lr_init=config['train']['lrs_mapping']['means3D'], lr_final=config['train']['lrs_mapping_means3D_final'], lr_delay_mult=config['train']['lr_delay_mult'], max_steps=config['train']['num_iters_mapping']) # Mapping if (time_idx + 1) == num_frames: if num_iters_mapping > 0: progress_bar = tqdm(range(num_iters_mapping), desc=f"Mapping Time Step: {time_idx}") for iter in range(num_iters_mapping): # Update Learning Rates for means3D updated_lr = update_learning_rate(optimizer, means3D_scheduler, iter+1) if config['use_wandb']: wandb_run.log({"Learning Rate - Means3D": updated_lr}) # Randomly select a frame until current time step iter_time_idx = random.randint(0, time_idx) # Initialize Data for selected frame iter_color = color_all_frames_map[iter_time_idx] iter_depth = depth_all_frames_map[iter_time_idx] iter_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] iter_gs_cam = gs_cams_all_frames_map[iter_time_idx] iter_data = {'cam': iter_gs_cam, 'im': iter_color, 'depth': iter_depth, 'id': iter_time_idx, 'intrinsics': map_intrinsics, 'w2c': gt_w2c_all_frames_map[iter_time_idx], 'iter_gt_w2c_list': iter_gt_w2c} # Loss for current frame loss, variables, losses = get_loss_gs(params, iter_data, variables, config['train']['loss_weights']) # Backprop loss.backward() with torch.no_grad(): # Gaussian-Splatting's Gradient-based Densification if config['train']['use_gaussian_splatting_densification']: params, variables = densify(params, variables, optimizer, iter, config['train']['densify_dict']) if config['use_wandb']: wandb_run.log({"Number of Gaussians - Densification": params['means3D'].shape[0]}) # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) # Report Progress if config['report_iter_progress']: if config['use_wandb']: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], wandb_run=wandb_run, wandb_step=wandb_step, wandb_save_qual=config['wandb']['save_qual'], mapping=True, online_time_idx=time_idx) else: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], mapping=True, online_time_idx=time_idx) else: progress_bar.update(1) # Eval Params at 7K Iterations if (iter + 1) == 7000: print("Evaluating Params at 7K Iterations") eval_params = convert_params_to_store(params) output_dir = os.path.join(config["workdir"], config["run_name"]) eval_dir = os.path.join(output_dir, "eval_7k") os.makedirs(eval_dir, exist_ok=True) if config['use_wandb']:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]: return Record3DDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["realsense"]: return RealsenseDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["tum"]: return TUMDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannetpp"]: return ScannetPPDataset(basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["nerfcapture"]: return NeRFCaptureDataset(basedir, sequence, **kwargs) else: raise ValueError(f"Unknown dataset name {config_dict['dataset_name']}") def get_pointcloud(color, depth, intrinsics, w2c, transform_pts=True, mask=None, compute_mean_sq_dist=False, mean_sq_dist_method="projective"): width, height = color.shape[2], color.shape[1] CX = intrinsics[0][2] CY = intrinsics[1][2] FX = intrinsics[0][0] FY = intrinsics[1][1] # Compute indices of pixels x_grid, y_grid = torch.meshgrid(torch.arange(width).cuda().float(), torch.arange(height).cuda().float(), indexing='xy') xx = (x_grid - CX)/FX yy = (y_grid - CY)/FY xx = xx.reshape(-1) yy = yy.reshape(-1) depth_z = depth[0].reshape(-1) # Initialize point cloud pts_cam = torch.stack((xx * depth_z, yy * depth_z, depth_z), dim=-1) if transform_pts: pix_ones = torch.ones(height * width, 1).cuda().float() pts4 = torch.cat((pts_cam, pix_ones), dim=1) c2w = torch.inverse(w2c) pts = (c2w @ pts4.T).T[:, :3] else: pts = pts_cam # Compute mean squared distance for initializing the scale of the Gaussians if compute_mean_sq_dist: if mean_sq_dist_method == "projective": # Projective Geometry (this is fast, farther -> larger radius) scale_gaussian = depth_z / ((FX + FY)/2) mean3_sq_dist = scale_gaussian**2 else: raise ValueError(f"Unknown mean_sq_dist_method: {mean_sq_dist_method}") # Colorize point cloud cols = torch.permute(color, (1, 2, 0)).reshape(-1, 3) # (C, H, W) -> (H, W, C) -> (H * W, C) point_cld = torch.cat((pts, cols), -1) # Select points based on mask if mask is not None: point_cld = point_cld[mask] if compute_mean_sq_dist: mean3_sq_dist = mean3_sq_dist[mask] if compute_mean_sq_dist: return point_cld, mean3_sq_dist else: return point_cld def initialize_params(init_pt_cld, num_frames, mean3_sq_dist): num_pts = init_pt_cld.shape[0] means3D = init_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': init_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } # Initialize a single gaussian trajectory to model the camera poses relative to the first frame cam_rots = np.tile([1, 0, 0, 0], (1, 1)) cam_rots = np.tile(cam_rots[:, :, None], (1, 1, num_frames)) params['cam_unnorm_rots'] = cam_rots params['cam_trans'] = np.zeros((1, 3, num_frames)) for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) variables = {'max_2D_radius': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'means2D_gradient_accum': torch.zeros(params['means3D'].shape[0]).cuda().float(), 'denom': torch.zeros(params['means3D'].shape[0]).cuda().float()} return params, variables def initialize_optimizer(params, lrs_dict): lrs = lrs_dict param_groups = [{'params': [v], 'name': k, 'lr': lrs[k]} for k, v in params.items()] return torch.optim.Adam(param_groups, lr=0.0, eps=1e-15) def initialize_first_timestep_from_ckpt(ckpt_path,dataset, num_frames, lrs_dict, mean_sq_dist_method): # Get RGB-D Data & Camera Parameters color, depth, intrinsics, pose = dataset[0] # Process RGB-D Data color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W) depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W) # Process Camera Parameters intrinsics = intrinsics[:3, :3] w2c = torch.linalg.inv(pose) # Setup Camera cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy()) # Get Initial Point Cloud (PyTorch CUDA Tensor) mask = (depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) # Initialize Parameters & Optimizer from Checkpoint # Load checkpoint print(f"Loading Params") params = dict(np.load(ckpt_path, allow_pickle=True)) variables = {} for k in ['intrinsics', 'w2c', 'org_width', 'org_height', 'gt_w2c_all_frames']: # for k in ['timestep','intrinsics', 'w2c', 'org_width', 'org_height', 'gt_w2c_all_frames']: params.pop(k) print(params.keys()) params = {k: torch.tensor(params[k]).cuda().float().requires_grad_(True) for k in params.keys()} variables['max_2D_radius'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['means2D_gradient_accum'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['denom'] = torch.zeros(params['means3D'].shape[0]).cuda().float() # variables['timestep'] = torch.zeros(params['means3D'].shape[0]).cuda().float() variables['timestep'] = torch.tensor(params['timestep']).cuda().float() params.pop('timestep') optimizer = initialize_optimizer(params, lrs_dict) # Initialize an estimate of scene radius for Gaussian-Splatting Densification variables['scene_radius'] = torch.max(depth)/2.0 return params, variables, optimizer, intrinsics, w2c, cam def get_loss_gs(params, curr_data, variables, loss_weights): # Initialize Loss Dictionary losses = {} # Initialize Render Variables rendervar = params2rendervar(params) depth_sil_rendervar = params2depthplussilhouette(params, curr_data['w2c']) # RGB Rendering rendervar['means2D'].retain_grad() im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar) variables['means2D'] = rendervar['means2D'] # Gradient only accum from colour render for densification # Depth & Silhouette Rendering depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar) depth = depth_sil[0, :, :].unsqueeze(0) silhouette = depth_sil[1, :, :] # Get invalid Depth Mask valid_depth_mask = (curr_data['depth'] != 0.0) depth = depth * valid_depth_mask # RGB Loss losses['im'] = 0.8 * l1_loss_v1(im, curr_data['im']) + 0.2 * (1.0 - calc_ssim(im, curr_data['im'])) # Depth Loss losses['depth'] = l1_loss_v1(depth, curr_data['depth']) weighted_losses = {k: v * loss_weights[k] for k, v in losses.items()} loss = sum(weighted_losses.values()) seen = radius > 0 variables['max_2D_radius'][seen] = torch.max(radius[seen], variables['max_2D_radius'][seen]) variables['seen'] = seen weighted_losses['loss'] = loss return loss, variables, weighted_losses def initialize_new_params(new_pt_cld, mean3_sq_dist): num_pts = new_pt_cld.shape[0] means3D = new_pt_cld[:, :3] # [num_gaussians, 3] unnorm_rots = np.tile([1, 0, 0, 0], (num_pts, 1)) # [num_gaussians, 3] logit_opacities = torch.zeros((num_pts, 1), dtype=torch.float, device="cuda") params = { 'means3D': means3D, 'rgb_colors': new_pt_cld[:, 3:6], 'unnorm_rotations': unnorm_rots, 'logit_opacities': logit_opacities, 'log_scales': torch.tile(torch.log(torch.sqrt(mean3_sq_dist))[..., None], (1, 1)), } for k, v in params.items(): # Check if value is already a torch tensor if not isinstance(v, torch.Tensor): params[k] = torch.nn.Parameter(torch.tensor(v).cuda().float().contiguous().requires_grad_(True)) else: params[k] = torch.nn.Parameter(v.cuda().float().contiguous().requires_grad_(True)) return params def infill_depth(depth, inpaint_radius=1): """ Function to infill Depth for invalid regions Input: depth: Depth Image (Numpy) radius: Radius of the circular neighborhood for infilling Output: depth: Depth Image with invalid regions infilled (Numpy) """ invalid_mask = (depth == 0) invalid_mask = invalid_mask.astype(np.uint8) filled_depth = cv2.inpaint(depth, invalid_mask, inpaint_radius, cv2.INPAINT_NS) return filled_depth def add_new_gaussians(params, variables, curr_data, sil_thres, time_idx, mean_sq_dist_method): # Silhouette Rendering transformed_pts = transform_to_frame(params, time_idx, gaussians_grad=False, camera_grad=False) depth_sil_rendervar = transformed_params2depthplussilhouette(params, curr_data['w2c'], transformed_pts) depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar) silhouette = depth_sil[1, :, :] non_presence_sil_mask = (silhouette < sil_thres) # Check for new foreground objects by using GT depth gt_depth = curr_data['depth'][0, :, :] render_depth = depth_sil[0, :, :] depth_error = torch.abs(gt_depth - render_depth) * (gt_depth > 0) non_presence_depth_mask = (render_depth > gt_depth) * (depth_error > 50*depth_error.median()) # Determine non-presence mask non_presence_mask = non_presence_sil_mask | non_presence_depth_mask # Infill Depth for invalid regions of GT Depth infilled_gt_depth = infill_depth(curr_data['depth'][0, :, :].detach().cpu().numpy()) infilled_gt_depth = torch.tensor(infilled_gt_depth).cuda().float().unsqueeze(0) # Flatten mask non_presence_mask = non_presence_mask.reshape(-1) # Get the new frame Gaussians based on the Silhouette if torch.sum(non_presence_mask) > 0: # Get the new pointcloud in the world frame curr_cam_rot = torch.nn.functional.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran valid_depth_mask = (infilled_gt_depth > 0) non_presence_mask = non_presence_mask & valid_depth_mask.reshape(-1) new_pt_cld, mean3_sq_dist = get_pointcloud(curr_data['im'], infilled_gt_depth, curr_data['intrinsics'], curr_w2c, mask=non_presence_mask, compute_mean_sq_dist=True, mean_sq_dist_method=mean_sq_dist_method) new_params = initialize_new_params(new_pt_cld, mean3_sq_dist) for k, v in new_params.items(): params[k] = torch.nn.Parameter(torch.cat((params[k], v), dim=0).requires_grad_(True)) num_pts = params['means3D'].shape[0] variables['means2D_gradient_accum'] = torch.zeros(num_pts, device="cuda").float() variables['denom'] = torch.zeros(num_pts, device="cuda").float() variables['max_2D_radius'] = torch.zeros(num_pts, device="cuda").float() new_timestep = time_idx*torch.ones(new_pt_cld.shape[0],device="cuda").float() variables['timestep'] = torch.cat((variables['timestep'],new_timestep),dim=0) return params, variables def convert_params_to_store(params): params_to_store = {} for k, v in params.items(): if isinstance(v, torch.Tensor): params_to_store[k] = v.detach().clone() else: params_to_store[k] = v return params_to_store def rgbd_slam(config: dict): # Print Config print("Loaded Config:") print(f"{config}") # Init WandB if config['use_wandb']: wandb_step = 0 wandb_time_step = 0 wandb_run = wandb.init(project=config['wandb']['project'], entity=config['wandb']['entity'], group=config['wandb']['group'], name=config['wandb']['name'], config=config) wandb_run.define_metric("Mapping_Iters") wandb_run.define_metric("Number of Gaussians - Densification", step_metric="Mapping_Iters") wandb_run.define_metric("Learning Rate - Means3D", step_metric="Mapping_Iters") # Get Device device = torch.device(config["primary_device"]) # Load Dataset print("Loading Dataset ...") dataset_config = config["data"] if "gradslam_data_cfg" not in dataset_config: gradslam_data_cfg = {} gradslam_data_cfg["dataset_name"] = dataset_config["dataset_name"] else: gradslam_data_cfg = load_dataset_config(dataset_config["gradslam_data_cfg"]) if "ignore_bad" not in dataset_config: dataset_config["ignore_bad"] = False if "use_train_split" not in dataset_config: dataset_config["use_train_split"] = True # Poses are relative to the first frame mapping_dataset = get_dataset( config_dict=gradslam_data_cfg, basedir=dataset_config["basedir"], sequence=os.path.basename(dataset_config["sequence"]), start=dataset_config["start"], end=dataset_config["end"], stride=dataset_config["stride"], desired_height=dataset_config["desired_image_height"], desired_width=dataset_config["desired_image_width"], device=device, relative_pose=True, ignore_bad=dataset_config["ignore_bad"], use_train_split=dataset_config["use_train_split"], ) eval_dataset = get_dataset( config_dict=gradslam_data_cfg, basedir=dataset_config["basedir"], sequence=os.path.basename(dataset_config["sequence"]), start=dataset_config["start"], end=dataset_config["end"], stride=dataset_config["eval_stride"], desired_height=dataset_config["desired_image_height"], desired_width=dataset_config["desired_image_width"], device=device, relative_pose=True, ignore_bad=dataset_config["ignore_bad"], use_train_split=dataset_config["use_train_split"], ) num_frames = dataset_config["num_frames"] if num_frames == -1: num_frames = len(mapping_dataset) eval_num_frames = dataset_config["eval_num_frames"] if eval_num_frames == -1: eval_num_frames = len(eval_dataset) # Initialize Parameters, Optimizer & Canoncial Camera parameters ckpt_path = config["data"]["param_ckpt_path"] params, variables, optimizer, intrinsics, w2c, cam = initialize_first_timestep_from_ckpt(ckpt_path,mapping_dataset, num_frames, config['train']['lrs_mapping'], config['mean_sq_dist_method']) _, _, map_intrinsics, _ = mapping_dataset[0] # Load all RGBD frames - Mapping dataloader color_all_frames_map = [] depth_all_frames_map = [] gt_w2c_all_frames_map = [] gs_cams_all_frames_map = [] for time_idx in range(num_frames): color, depth, _, gt_pose = mapping_dataset[time_idx] # Process poses gt_w2c = torch.linalg.inv(gt_pose) # Process RGB-D Data color = color.permute(2, 0, 1) / 255 depth = depth.permute(2, 0, 1) color_all_frames_map.append(color) depth_all_frames_map.append(depth) gt_w2c_all_frames_map.append(gt_w2c) # Setup Gaussian Splatting Camera gs_cam = setup_camera(color.shape[2], color.shape[1], map_intrinsics.cpu().numpy(), gt_w2c.detach().cpu().numpy()) gs_cams_all_frames_map.append(gs_cam) # Iterate over Scan for time_idx in tqdm(range(num_frames)): # Optimization Iterations num_iters_mapping = config['train']['num_iters_mapping'] # Initialize current frame data iter_time_idx = time_idx color = color_all_frames_map[iter_time_idx] depth = depth_all_frames_map[iter_time_idx] curr_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette # if time_idx > 0: # params, variables = add_new_gaussians(params, variables, curr_data, # config['train']['sil_thres'], time_idx, # config['mean_sq_dist_method']) post_num_pts = params['means3D'].shape[0] if config['use_wandb']: wandb_run.log({"Init/Number of Gaussians": post_num_pts, "Init/step": wandb_time_step}) # Reset Optimizer & Learning Rates for Full Map Optimization optimizer = initialize_optimizer(params, config['train']['lrs_mapping']) means3D_scheduler = get_expon_lr_func(lr_init=config['train']['lrs_mapping']['means3D'], lr_final=config['train']['lrs_mapping_means3D_final'], lr_delay_mult=config['train']['lr_delay_mult'], max_steps=config['train']['num_iters_mapping']) # Mapping if (time_idx + 1) == num_frames: if num_iters_mapping > 0: progress_bar = tqdm(range(num_iters_mapping), desc=f"Mapping Time Step: {time_idx}") for iter in range(num_iters_mapping): # Update Learning Rates for means3D updated_lr = update_learning_rate(optimizer, means3D_scheduler, iter+1) if config['use_wandb']: wandb_run.log({"Learning Rate - Means3D": updated_lr}) # Randomly select a frame until current time step iter_time_idx = random.randint(0, time_idx) # Initialize Data for selected frame iter_color = color_all_frames_map[iter_time_idx] iter_depth = depth_all_frames_map[iter_time_idx] iter_gt_w2c = gt_w2c_all_frames_map[:iter_time_idx+1] iter_gs_cam = gs_cams_all_frames_map[iter_time_idx] iter_data = {'cam': iter_gs_cam, 'im': iter_color, 'depth': iter_depth, 'id': iter_time_idx, 'intrinsics': map_intrinsics, 'w2c': gt_w2c_all_frames_map[iter_time_idx], 'iter_gt_w2c_list': iter_gt_w2c} # Loss for current frame loss, variables, losses = get_loss_gs(params, iter_data, variables, config['train']['loss_weights']) # Backprop loss.backward() with torch.no_grad(): # Gaussian-Splatting's Gradient-based Densification if config['train']['use_gaussian_splatting_densification']: params, variables = densify(params, variables, optimizer, iter, config['train']['densify_dict']) if config['use_wandb']: wandb_run.log({"Number of Gaussians - Densification": params['means3D'].shape[0]}) # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) # Report Progress if config['report_iter_progress']: if config['use_wandb']: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], wandb_run=wandb_run, wandb_step=wandb_step, wandb_save_qual=config['wandb']['save_qual'], mapping=True, online_time_idx=time_idx) else: report_progress(params, iter_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['train']['sil_thres'], mapping=True, online_time_idx=time_idx) else: progress_bar.update(1) # Eval Params at 7K Iterations if (iter + 1) == 7000: print("Evaluating Params at 7K Iterations") eval_params = convert_params_to_store(params) output_dir = os.path.join(config["workdir"], config["run_name"]) eval_dir = os.path.join(output_dir, "eval_7k") os.makedirs(eval_dir, exist_ok=True) if config['use_wandb']:
eval(eval_dataset, eval_params, eval_num_frames, eval_dir, sil_thres=config['train']['sil_thres'],
22
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/models/zoedepth_custom/patchfusion.py
[ { "identifier": "DepthModel", "path": "zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cpu'\n \n def to(self, device) -> nn.Module:\n self.device = device\n return super().to(device)\n \n def forward(self, x, *args, **kwargs):\n raise NotImplementedError\n \n def _infer(self, x: torch.Tensor):\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n return self(x)['metric_depth']\n \n def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode=\"reflect\", **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with padding augmentation\n Padding augmentation fixes the boundary artifacts in the output depth map.\n Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.\n This augmentation pads the input image and crops the prediction back to the original size / view.\n\n Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to pad the input or not. Defaults to True.\n fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.\n fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.\n upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.\n padding_mode (str, optional): padding mode. Defaults to \"reflect\".\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # assert x is nchw and c = 3\n assert x.dim() == 4, \"x must be 4 dimensional, got {}\".format(x.dim())\n assert x.shape[1] == 3, \"x must have 3 channels, got {}\".format(x.shape[1])\n\n if pad_input:\n assert fh > 0 or fw > 0, \"atlease one of fh and fw must be greater than 0\"\n pad_h = int(np.sqrt(x.shape[2]/2) * fh)\n pad_w = int(np.sqrt(x.shape[3]/2) * fw)\n padding = [pad_w, pad_w]\n if pad_h > 0:\n padding += [pad_h, pad_h]\n \n x = F.pad(x, padding, mode=padding_mode, **kwargs)\n out = self._infer(x)\n if out.shape[-2:] != x.shape[-2:]:\n out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)\n if pad_input:\n # crop to the original size, handling the case where pad_h and pad_w is 0\n if pad_h > 0:\n out = out[:, :, pad_h:-pad_h,:]\n if pad_w > 0:\n out = out[:, :, :, pad_w:-pad_w]\n return out\n \n def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model with horizontal flip augmentation\n Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n # infer with horizontal flip and average\n out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)\n out = (out + torch.flip(out_flip, dims=[3])) / 2\n return out\n \n def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:\n \"\"\"\n Inference interface for the model\n Args:\n x (torch.Tensor): input tensor of shape (b, c, h, w)\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n Returns:\n torch.Tensor: output tensor of shape (b, 1, h, w)\n \"\"\"\n if with_flip_aug:\n return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)\n else:\n return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)\n \n @torch.no_grad()\n def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str=\"numpy\", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:\n \"\"\"\n Inference interface for the model for PIL image\n Args:\n pil_img (PIL.Image.Image): input PIL image\n pad_input (bool, optional): whether to use padding augmentation. Defaults to True.\n with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.\n output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to \"numpy\".\n \"\"\"\n x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)\n out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)\n if output_type == \"numpy\":\n return out_tensor.squeeze().cpu().numpy()\n elif output_type == \"pil\":\n # uint16 is required for depth pil image\n out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)\n return Image.fromarray(out_16bit_numpy)\n elif output_type == \"tensor\":\n return out_tensor.squeeze().cpu()\n else:\n raise ValueError(f\"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'\")" }, { "identifier": "MidasCore", "path": "zoedepth/models/base_models/midas.py", "snippet": "class MidasCore(nn.Module):\n def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,\n img_size=384, **kwargs):\n \"\"\"Midas Base model used for multi-scale feature extraction.\n\n Args:\n midas (torch.nn.Module): Midas model.\n trainable (bool, optional): Train midas model. Defaults to False.\n fetch_features (bool, optional): Extract multi-scale features. Defaults to True.\n layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').\n freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.\n keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.\n img_size (int, tuple, optional): Input resolution. Defaults to 384.\n \"\"\"\n super().__init__()\n self.core = midas\n self.output_channels = None\n self.core_out = {}\n self.trainable = trainable\n self.fetch_features = fetch_features\n # midas.scratch.output_conv = nn.Identity()\n self.handles = []\n # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']\n self.layer_names = layer_names\n\n self.set_trainable(trainable)\n self.set_fetch_features(fetch_features)\n\n self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,\n img_size=img_size, do_resize=kwargs.get('do_resize', True))\n\n if freeze_bn:\n self.freeze_bn()\n\n def set_trainable(self, trainable):\n self.trainable = trainable\n if trainable:\n self.unfreeze()\n else:\n self.freeze()\n return self\n\n def set_fetch_features(self, fetch_features):\n self.fetch_features = fetch_features\n if fetch_features:\n if len(self.handles) == 0:\n self.attach_hooks(self.core)\n else:\n self.remove_hooks()\n return self\n\n def freeze(self):\n for p in self.parameters():\n p.requires_grad = False\n self.trainable = False\n return self\n\n def unfreeze(self):\n for p in self.parameters():\n p.requires_grad = True\n self.trainable = True\n return self\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n return self\n\n def forward(self, x, denorm=False, return_rel_depth=False):\n with torch.no_grad():\n if denorm:\n x = denormalize(x)\n x = self.prep(x)\n # print(\"Shape after prep: \", x.shape)\n\n with torch.set_grad_enabled(self.trainable):\n\n # print(\"Input size to Midascore\", x.shape)\n rel_depth = self.core(x)\n # print(\"Output from midas shape\", rel_depth.shape)\n if not self.fetch_features:\n return rel_depth\n out = [self.core_out[k] for k in self.layer_names]\n\n if return_rel_depth:\n return rel_depth, out\n return out\n\n def get_rel_pos_params(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" in name:\n yield p\n\n def get_enc_params_except_rel_pos(self):\n for name, p in self.core.pretrained.named_parameters():\n if \"relative_position\" not in name:\n yield p\n\n def freeze_encoder(self, freeze_rel_pos=False):\n if freeze_rel_pos:\n for p in self.core.pretrained.parameters():\n p.requires_grad = False\n else:\n for p in self.get_enc_params_except_rel_pos():\n p.requires_grad = False\n return self\n\n def attach_hooks(self, midas):\n if len(self.handles) > 0:\n self.remove_hooks()\n if \"out_conv\" in self.layer_names:\n self.handles.append(list(midas.scratch.output_conv.children())[\n 3].register_forward_hook(get_activation(\"out_conv\", self.core_out)))\n if \"r4\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet4.register_forward_hook(\n get_activation(\"r4\", self.core_out)))\n if \"r3\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet3.register_forward_hook(\n get_activation(\"r3\", self.core_out)))\n if \"r2\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet2.register_forward_hook(\n get_activation(\"r2\", self.core_out)))\n if \"r1\" in self.layer_names:\n self.handles.append(midas.scratch.refinenet1.register_forward_hook(\n get_activation(\"r1\", self.core_out)))\n if \"l4_rn\" in self.layer_names:\n self.handles.append(midas.scratch.layer4_rn.register_forward_hook(\n get_activation(\"l4_rn\", self.core_out)))\n\n return self\n\n def remove_hooks(self):\n for h in self.handles:\n h.remove()\n return self\n\n def __del__(self):\n self.remove_hooks()\n\n def set_output_channels(self, model_type):\n self.output_channels = MIDAS_SETTINGS[model_type]\n\n @staticmethod\n def build(midas_model_type=\"DPT_BEiT_L_384\", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):\n if midas_model_type not in MIDAS_SETTINGS:\n raise ValueError(\n f\"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}\")\n if \"img_size\" in kwargs:\n kwargs = MidasCore.parse_img_size(kwargs)\n img_size = kwargs.pop(\"img_size\", [384, 384])\n print(\"img_size\", img_size)\n midas = torch.hub.load(\"intel-isl/MiDaS\", midas_model_type,\n pretrained=use_pretrained_midas, force_reload=force_reload)\n kwargs.update({'keep_aspect_ratio': force_keep_ar})\n midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,\n freeze_bn=freeze_bn, img_size=img_size, **kwargs)\n midas_core.set_output_channels(midas_model_type)\n return midas_core\n\n @staticmethod\n def build_from_config(config):\n return MidasCore.build(**config)\n\n @staticmethod\n def parse_img_size(config):\n assert 'img_size' in config\n if isinstance(config['img_size'], str):\n assert \",\" in config['img_size'], \"img_size should be a string with comma separated img_size=H,W\"\n config['img_size'] = list(map(int, config['img_size'].split(\",\")))\n assert len(\n config['img_size']) == 2, \"img_size should be a string with comma separated img_size=H,W\"\n elif isinstance(config['img_size'], int):\n config['img_size'] = [config['img_size'], config['img_size']]\n else:\n assert isinstance(config['img_size'], list) and len(\n config['img_size']) == 2, \"img_size should be a list of H,W\"\n return config" }, { "identifier": "AttractorLayer", "path": "zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayer(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n eps = 1e-3\n A = A + eps\n n, c, h, w = A.shape\n A = A.view(n, self.n_attractors, 2, h, w)\n A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w\n A_normed = A[:, :, 0, ...] # n, na, h, w\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(dist(A_normed.unsqueeze(\n 2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n # .shape N, nbins, h, w\n delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = (self.max_depth - self.min_depth) * \\\n b_new_centers + self.min_depth\n B_centers, _ = torch.sort(B_centers, dim=1)\n B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)\n return b_new_centers, B_centers" }, { "identifier": "AttractorLayerUnnormed", "path": "zoedepth/models/layers/attractor.py", "snippet": "class AttractorLayerUnnormed(nn.Module):\n def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,\n alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):\n \"\"\"\n Attractor layer for bin centers. Bin centers are unbounded\n \"\"\"\n super().__init__()\n\n self.n_attractors = n_attractors\n self.n_bins = n_bins\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.alpha = alpha\n self.gamma = gamma\n self.kind = kind\n self.attractor_type = attractor_type\n self.memory_efficient = memory_efficient\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):\n \"\"\"\n Args:\n x (torch.Tensor) : feature block; shape - n, c, h, w\n b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w\n \n Returns:\n tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version\n \"\"\"\n if prev_b_embedding is not None:\n if interpolate:\n prev_b_embedding = nn.functional.interpolate(\n prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)\n x = x + prev_b_embedding\n\n A = self._net(x)\n n, c, h, w = A.shape\n\n b_prev = nn.functional.interpolate(\n b_prev, (h, w), mode='bilinear', align_corners=True)\n b_centers = b_prev\n\n if self.attractor_type == 'exp':\n dist = exp_attractor\n else:\n dist = inv_attractor\n\n if not self.memory_efficient:\n func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]\n # .shape N, nbins, h, w\n delta_c = func(\n dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)\n else:\n delta_c = torch.zeros_like(b_centers, device=b_centers.device)\n for i in range(self.n_attractors):\n delta_c += dist(A[:, i, ...].unsqueeze(1) -\n b_centers) # .shape N, nbins, h, w\n\n if self.kind == 'mean':\n delta_c = delta_c / self.n_attractors\n\n b_new_centers = b_centers + delta_c\n B_centers = b_new_centers\n\n return b_new_centers, B_centers" }, { "identifier": "ConditionalLogBinomial", "path": "zoedepth/models/layers/dist_layers.py", "snippet": "class ConditionalLogBinomial(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),\n nn.Softplus()\n )\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n p, t = pt[:, :2, ...], pt[:, 2:, ...]\n\n p = p + self.p_eps\n p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])\n\n t = t + self.p_eps\n t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])\n t = t.unsqueeze(1)\n t = (self.max_temp - self.min_temp) * t + self.min_temp\n\n return self.log_binomial_transform(p, t)" }, { "identifier": "ConditionalLogBinomialV2", "path": "zoedepth/models/layers/dist_layers.py", "snippet": "class ConditionalLogBinomialV2(nn.Module):\n def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):\n \"\"\"Conditional Log Binomial distribution\n\n Args:\n in_features (int): number of input channels in main feature\n condition_dim (int): number of input channels in condition feature\n n_classes (int, optional): Number of classes. Defaults to 256.\n bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.\n p_eps (float, optional): small eps value. Defaults to 1e-4.\n max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.\n min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.\n \"\"\"\n super().__init__()\n self.p_eps = p_eps\n self.max_temp = max_temp\n self.min_temp = min_temp\n self.log_binomial_transform = LogBinomial(n_classes, act=act)\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(\n nn.Conv2d(in_features + condition_dim, bottleneck,\n kernel_size=1, stride=1, padding=0),\n nn.GELU(),\n # 2 for p linear norm, 2 for t linear norm\n nn.Conv2d(bottleneck, n_classes*2, kernel_size=1, stride=1, padding=0),\n nn.Sigmoid()\n )\n self.n_classes = n_classes\n\n\n def forward(self, x, cond):\n \"\"\"Forward pass\n\n Args:\n x (torch.Tensor - NCHW): Main feature\n cond (torch.Tensor - NCHW): condition feature\n\n Returns:\n torch.Tensor: Output log binomial distribution\n \"\"\"\n pt = self.mlp(torch.concat((x, cond), dim=1))\n prob, shift = pt[:, :self.n_classes, ...], pt[:, self.n_classes:, ...]\n return prob, shift" }, { "identifier": "Projector", "path": "zoedepth/models/layers/localbins_layers.py", "snippet": "class Projector(nn.Module):\n def __init__(self, in_features, out_features, mlp_dim=128):\n \"\"\"Projector MLP\n\n Args:\n in_features (int): input channels\n out_features (int): output channels\n mlp_dim (int, optional): hidden dimension. Defaults to 128.\n \"\"\"\n super().__init__()\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, out_features, 1, 1, 0),\n )\n\n def forward(self, x):\n return self._net(x)" }, { "identifier": "SeedBinRegressor", "path": "zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressor(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Min depth value. Defaults to 1e-3.\n max_depth (float, optional): Max depth value. Defaults to 10.\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self.min_depth = min_depth\n self.max_depth = max_depth\n\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B = self._net(x)\n eps = 1e-3\n B = B + eps\n B_widths_normed = B / B.sum(dim=1, keepdim=True)\n B_widths = (self.max_depth - self.min_depth) * \\\n B_widths_normed # .shape NCHW\n # pad has the form (left, right, top, bottom, front, back)\n B_widths = nn.functional.pad(\n B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)\n B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW\n\n B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])\n return B_widths_normed, B_centers" }, { "identifier": "SeedBinRegressorUnnormed", "path": "zoedepth/models/layers/localbins_layers.py", "snippet": "class SeedBinRegressorUnnormed(nn.Module):\n def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):\n \"\"\"Bin center regressor network. Bin centers are unbounded\n\n Args:\n in_features (int): input channels\n n_bins (int, optional): Number of bin centers. Defaults to 16.\n mlp_dim (int, optional): Hidden dimension. Defaults to 256.\n min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)\n \"\"\"\n super().__init__()\n self.version = \"1_1\"\n self._net = nn.Sequential(\n nn.Conv2d(in_features, mlp_dim, 1, 1, 0),\n nn.ReLU(inplace=True),\n nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),\n nn.Softplus()\n )\n\n def forward(self, x):\n \"\"\"\n Returns tensor of bin_width vectors (centers). One vector b for every pixel\n \"\"\"\n B_centers = self._net(x)\n return B_centers, B_centers" }, { "identifier": "load_state_from_resource", "path": "zoedepth/models/model_io.py", "snippet": "def load_state_from_resource(model, resource: str):\n \"\"\"Loads weights to the model from a given resource. A resource can be of following types:\n 1. URL. Prefixed with \"url::\"\n e.g. url::http(s)://url.resource.com/ckpt.pt\n\n 2. Local path. Prefixed with \"local::\"\n e.g. local::/path/to/ckpt.pt\n\n\n Args:\n model (torch.nn.Module): Model\n resource (str): resource string\n\n Returns:\n torch.nn.Module: Model with loaded weights\n \"\"\"\n print(f\"Using pretrained resource {resource}\")\n\n if resource.startswith('url::'):\n url = resource.split('url::')[1]\n return load_state_dict_from_url(model, url, progress=True)\n\n elif resource.startswith('local::'):\n path = resource.split('local::')[1]\n return load_wts(model, path)\n \n else:\n raise ValueError(\"Invalid resource type, only url:: and local:: are supported\")" }, { "identifier": "generatemask", "path": "zoedepth/utils/misc.py", "snippet": "def generatemask(size, k_size=-1, sigma=-1, h_factor=0.03, w_factor=0.02):\n # Generates a Guassian mask\n mask = np.zeros(size, dtype=np.float32)\n if sigma == -1:\n sigma = int(size[0]/16)\n if k_size == -1:\n k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)\n # mask[int(0.02*size[0]):size[0] - int(0.02*size[0]), int(0.015*size[1]): size[1] - int(0.015*size[1])] = 1\n mask[int(h_factor*size[0]):size[0] - int(h_factor*size[0]), int(w_factor*size[1]): size[1] - int(w_factor*size[1])] = 1\n mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)\n mask = (mask - mask.min()) / (mask.max() - mask.min())\n mask = mask.astype(np.float32)\n return mask" }, { "identifier": "TransformerDecoderLayer", "path": "zoedepth/models/layers/transformer.py", "snippet": "class TransformerDecoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,\n activation=\"relu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.norm3 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(tgt, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt = self.norm1(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt = self.norm2(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = tgt + self.dropout3(tgt2)\n tgt = self.norm3(tgt)\n return tgt\n\n def forward_pre(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n tgt2 = self.norm1(tgt)\n q = k = self.with_pos_embed(tgt2, query_pos)\n tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,\n key_padding_mask=tgt_key_padding_mask)[0]\n tgt = tgt + self.dropout1(tgt2)\n tgt2 = self.norm2(tgt)\n tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),\n key=self.with_pos_embed(memory, pos),\n value=memory, attn_mask=memory_mask,\n key_padding_mask=memory_key_padding_mask)[0]\n tgt = tgt + self.dropout2(tgt2)\n tgt2 = self.norm3(tgt)\n tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))\n tgt = tgt + self.dropout3(tgt2)\n return tgt\n\n def forward(self, tgt, memory,\n tgt_mask: Optional[Tensor] = None,\n memory_mask: Optional[Tensor] = None,\n tgt_key_padding_mask: Optional[Tensor] = None,\n memory_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n query_pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)\n return self.forward_post(tgt, memory, tgt_mask, memory_mask,\n tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)" }, { "identifier": "TransformerEncoderLayer", "path": "zoedepth/models/layers/transformer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.0,\n activation=\"gelu\", normalize_before=False):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n q = k = self.with_pos_embed(src, pos)\n src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward_pre(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n src2 = self.norm1(src)\n q = k = self.with_pos_embed(src2, pos)\n src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,\n key_padding_mask=src_key_padding_mask)[0]\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src\n\n def forward(self, src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None):\n if self.normalize_before:\n return self.forward_pre(src, src_mask, src_key_padding_mask, pos)\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)" }, { "identifier": "TransformerEncoder", "path": "zoedepth/models/layers/transformer.py", "snippet": "class TransformerEncoder(nn.Module):\n\n def __init__(self, encoder_layer, num_layers, norm=None, num_patches=None, ape=True, embed_dim=None, input_dim=None):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n self.embed_dim = embed_dim\n if input_dim != embed_dim:\n self.proj_x = nn.Conv2d(input_dim, embed_dim, 3, padding=1)\n else:\n self.proj_x = None\n\n self.embed_proj = nn.Conv2d(1, embed_dim, 1, 1, 0) # learnable\n self.ape = ape\n if self.ape:\n self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim), requires_grad=True)\n trunc_normal_(self.absolute_pos_embed, std=.02)\n\n def forward(self, x,\n mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n area_prior = None):\n\n if self.proj_x is not None:\n x = self.proj_x(x)\n \n if area_prior is not None:\n prior_embed = self.embed_proj(area_prior)\n x = x + prior_embed\n\n Wh, Ww = x.size(2), x.size(3)\n x = x.flatten(2).transpose(1, 2)\n\n if self.ape:\n x = x + self.absolute_pos_embed # this line is later added\n\n for layer in self.layers:\n x = layer(x, src_mask=None, src_key_padding_mask=None, pos=None)\n\n if self.norm is not None:\n x = self.norm(x)\n\n x = x.view(-1, Wh, Ww, self.embed_dim).permute(0, 3, 1, 2).contiguous()\n return x" }, { "identifier": "colorize", "path": "zoedepth/utils/misc.py", "snippet": "def colorize(value, vmin=None, vmax=None, cmap='turbo_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):\n \"\"\"Converts a depth map to a color image.\n\n Args:\n value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed\n vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.\n vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.\n cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.\n invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.\n invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.\n background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).\n gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.\n value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.\n\n Returns:\n numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)\n \"\"\"\n if isinstance(value, torch.Tensor):\n value = value.detach().cpu().numpy()\n\n value = value.squeeze()\n if invalid_mask is None:\n invalid_mask = value == invalid_val\n mask = np.logical_not(invalid_mask)\n\n # normalize\n vmin = np.percentile(value[mask],2) if vmin is None else vmin\n vmax = np.percentile(value[mask],85) if vmax is None else vmax\n if vmin != vmax:\n value = (value - vmin) / (vmax - vmin) # vmin..vmax\n else:\n # Avoid 0-division\n value = value * 0.\n\n # squeeze last dim if it exists\n # grey out the invalid values\n\n value[invalid_mask] = np.nan\n cmapper = matplotlib.cm.get_cmap(cmap)\n if value_transform:\n value = value_transform(value)\n # value = value / value.max()\n value = cmapper(value, bytes=True) # (nxmx4)\n\n # img = value[:, :, :]\n img = value[...]\n img[invalid_mask] = background_color\n\n # return img.transpose((2, 0, 1))\n if gamma_corrected:\n # gamma correction\n img = img / 255\n img = np.power(img, 2.2)\n img = img * 255\n img = img.astype(np.uint8)\n return img" }, { "identifier": "colors", "path": "zoedepth/utils/misc.py", "snippet": "class colors:\n '''Colors class:\n Reset all colors with colors.reset\n Two subclasses fg for foreground and bg for background.\n Use as colors.subclass.colorname.\n i.e. colors.fg.red or colors.bg.green\n Also, the generic bold, disable, underline, reverse, strikethrough,\n and invisible work with the main class\n i.e. colors.bold\n '''\n reset = '\\033[0m'\n bold = '\\033[01m'\n disable = '\\033[02m'\n underline = '\\033[04m'\n reverse = '\\033[07m'\n strikethrough = '\\033[09m'\n invisible = '\\033[08m'\n\n class fg:\n black = '\\033[30m'\n red = '\\033[31m'\n green = '\\033[32m'\n orange = '\\033[33m'\n blue = '\\033[34m'\n purple = '\\033[35m'\n cyan = '\\033[36m'\n lightgrey = '\\033[37m'\n darkgrey = '\\033[90m'\n lightred = '\\033[91m'\n lightgreen = '\\033[92m'\n yellow = '\\033[93m'\n lightblue = '\\033[94m'\n pink = '\\033[95m'\n lightcyan = '\\033[96m'\n\n class bg:\n black = '\\033[40m'\n red = '\\033[41m'\n green = '\\033[42m'\n orange = '\\033[43m'\n blue = '\\033[44m'\n purple = '\\033[45m'\n cyan = '\\033[46m'\n lightgrey = '\\033[47m'" }, { "identifier": "UNetv1", "path": "zoedepth/models/layers/fusion_network.py", "snippet": "class UNetv1(nn.Module):\n def __init__(self, n_channels, g2l, pos_embed=False, use_area_prior=True):\n super(UNetv1, self).__init__()\n self.n_channels = n_channels\n\n self.inc = DoubleConv(n_channels, 32)\n self.down1 = Down(32, 256)\n self.down2 = Down(256, 256)\n self.down3 = Down(256, 256)\n self.down4 = Down(256, 256)\n self.down5 = Down(256, 256)\n\n self.up1 = Upv1(256+256+256, 256, 384)\n self.up2 = Upv1(256+256+256, 256, 384)\n self.up3 = Upv1(256+256+256, 256, 384)\n self.up4 = Upv1(256+256+256, 256, 384)\n self.up5 = Upv1(256+32+256, 32, 272)\n\n self.g2l = g2l\n \n if self.g2l:\n self.g2l_att = nn.ModuleList()\n win = 12\n in_channels = [32, 256, 256, 256, 256, 256]\n crf_dims = [32, 256, 256, 256, 256, 256]\n\n self.g2l5 = G2LFusion(input_dim=in_channels[5], embed_dim=crf_dims[5], window_size=win, num_heads=32, depth=4, num_patches=12*16)\n self.g2l4 = G2LFusion(input_dim=in_channels[4], embed_dim=crf_dims[4], window_size=win, num_heads=32, depth=4, num_patches=24*32)\n self.g2l3 = G2LFusion(input_dim=in_channels[3], embed_dim=crf_dims[3], window_size=win, num_heads=16, depth=3, num_patches=48*64)\n self.g2l2 = G2LFusion(input_dim=in_channels[2], embed_dim=crf_dims[2], window_size=win, num_heads=16, depth=3, num_patches=96*128)\n self.g2l1 = G2LFusion(input_dim=in_channels[1], embed_dim=crf_dims[1], window_size=win, num_heads=8, depth=2, num_patches=192*256)\n self.g2l0 = G2LFusion(input_dim=in_channels[0], embed_dim=crf_dims[0], window_size=win, num_heads=8, depth=2, num_patches=384*512) \n\n self.conv5 = DoubleConvWOBN(in_channels[4] * 2, in_channels[4], in_channels[4])\n self.conv4 = DoubleConvWOBN(in_channels[4] * 2, in_channels[4], in_channels[4])\n self.conv3 = DoubleConvWOBN(in_channels[3] * 2, in_channels[3], in_channels[3])\n self.conv2 = DoubleConvWOBN(in_channels[2] * 2, in_channels[2], in_channels[2])\n self.conv1 = DoubleConvWOBN(in_channels[1] * 2, in_channels[1], in_channels[1])\n self.conv0 = DoubleConvWOBN(in_channels[0] * 2, in_channels[0], in_channels[0])\n \n def forward(self, \n input_tensor, \n guide_plus, \n guide_cat, \n crop_area_resize=None, \n bbox=None, \n fine_feat_crop=None, \n coarse_feat_whole=None, \n coarse_feat_whole_hack=None, \n coarse_feat_crop=None):\n\n # apply unscaled feat to swin\n if coarse_feat_whole_hack is not None:\n coarse_feat_whole = coarse_feat_whole_hack\n\n if crop_area_resize is None:\n not_use_prior = True\n else:\n not_use_prior = False\n \n x1 = self.inc(input_tensor)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3) \n x5 = self.down4(x4)\n x6 = self.down5(x5)\n if self.g2l:\n g2l_feat5 = self.g2l5(coarse_feat_whole[0], crop_area_resize[0])\n g2l_feat5 = torch_roi_align(g2l_feat5, bbox, (12, 16), 12/384, aligned=True)\n x6 = self.conv5(torch.cat([x6, g2l_feat5], dim=1))\n \n x5 = self.up1(torch.cat([x6, guide_cat[0]], dim=1), x5)\n if self.g2l:\n g2l_feat4 = self.g2l4(coarse_feat_whole[1], crop_area_resize[1])\n g2l_feat4 = torch_roi_align(g2l_feat4, bbox, (24, 32), 24/384, aligned=True)\n x5 = self.conv4(torch.cat([x5, g2l_feat4], dim=1)) \n\n x4 = self.up2(torch.cat([x5, guide_cat[1]], dim=1), x4)\n if self.g2l:\n g2l_feat3 = self.g2l3(coarse_feat_whole[2], crop_area_resize[2])\n g2l_feat3 = torch_roi_align(g2l_feat3, bbox, (48, 64), 48/384, aligned=True)\n x4 = self.conv3(torch.cat([x4, g2l_feat3], dim=1))\n\n x3 = self.up3(torch.cat([x4, guide_cat[2]], dim=1), x3)\n if self.g2l:\n g2l_feat2 = self.g2l2(coarse_feat_whole[3], crop_area_resize[3])\n g2l_feat2 = torch_roi_align(g2l_feat2, bbox, (96, 128), 96/384, aligned=True)\n x3 = self.conv2(torch.cat([x3, g2l_feat2], dim=1))\n\n x2 = self.up4(torch.cat([x3, guide_cat[3]], dim=1), x2)\n if self.g2l:\n g2l_feat1 = self.g2l1(coarse_feat_whole[4], crop_area_resize[4])\n g2l_feat1 = torch_roi_align(g2l_feat1, bbox, (192, 256), 192/384, aligned=True)\n x2 = self.conv1(torch.cat([x2, g2l_feat1], dim=1))\n\n x1 = self.up5(torch.cat([x2, guide_cat[4]], dim=1), x1)\n if self.g2l:\n g2l_feat0 = self.g2l0(coarse_feat_whole[5], crop_area_resize[5])\n g2l_feat0 = torch_roi_align(g2l_feat0, bbox, (384, 512), 384/384, aligned=True)\n x1 = self.conv0(torch.cat([x1, g2l_feat0], dim=1))\n\n output = [x1, x2, x3, x4, x5, x6]\n return output" } ]
import itertools import math import copy import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import torch.distributed as dist import torch.nn.functional as F from zoedepth.models.depth_model import DepthModel from zoedepth.models.base_models.midas import MidasCore from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed from zoedepth.models.layers.dist_layers import ConditionalLogBinomial, ConditionalLogBinomialV2 from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from zoedepth.models.model_io import load_state_from_resource from torchvision.transforms import Normalize from torchvision.ops import roi_align as torch_roi_align from zoedepth.utils.misc import generatemask from zoedepth.models.layers.transformer import TransformerDecoderLayer, TransformerEncoderLayer, TransformerEncoder from zoedepth.utils.misc import colorize, colors from zoedepth.models.layers.fusion_network import UNetv1 from zoedepth.models.zoedepth_custom.zoedepth_custom import ZoeDepthCustom
14,798
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li def check_keywords_in_name(name, keywords=()): isin = False for keyword in keywords: if keyword in name: isin = True return isin def get_activation(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = output return hook def get_input(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = input return hook class AttributeDict(dict): def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError: raise AttributeError(key) class PatchFusion(DepthModel): def __init__(self, coarse_model, fine_model, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, sr_ratio=1, raw_depth_shape=(2160, 3840), transform_sample_gt_size=(2160, 3840), representation='', fetch_features=True, sample_feat_level=3, use_hr=False, deform=False, wo_bins=False, baseline=False, condition=True, freeze=False, g2l=False, use_fusion_network=False, use_area_prior=False, unet_version='v1', consistency_training=False, consistency_target='unet_feat', pos_embed=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. sr_ratio: sr ratio during infer raw_depth_shape: raw depth shape during infer. times sr_ratio will be the target resolution. Used to sample points during training transform_sample_gt_size: training depth shape # influenced by crop shape which is not included in this pipeline right now representation: I use it to test the "bilap head" and a discarded idea fetch_features: if fetch feats. Default=True """ super().__init__() self.coarse_model = coarse_model self.fine_model = fine_model self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": # default SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") N_MIDAS_OUT = 32 btlnck_features = self.fine_model.core.output_channels[0] num_out_features = self.fine_model.core.output_channels[1:] # all of them are the same self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li def check_keywords_in_name(name, keywords=()): isin = False for keyword in keywords: if keyword in name: isin = True return isin def get_activation(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = output return hook def get_input(name, bank): # input of forward_hook will be a function of model/inp/oup def hook(module, input, output): bank[name] = input return hook class AttributeDict(dict): def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError: raise AttributeError(key) class PatchFusion(DepthModel): def __init__(self, coarse_model, fine_model, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True, midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, sr_ratio=1, raw_depth_shape=(2160, 3840), transform_sample_gt_size=(2160, 3840), representation='', fetch_features=True, sample_feat_level=3, use_hr=False, deform=False, wo_bins=False, baseline=False, condition=True, freeze=False, g2l=False, use_fusion_network=False, use_area_prior=False, unet_version='v1', consistency_training=False, consistency_target='unet_feat', pos_embed=False, **kwargs): """ZoeDepth model. This is the version of ZoeDepth that has a single metric head Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features n_bins (int, optional): Number of bin centers. Defaults to 64. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3. max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. sr_ratio: sr ratio during infer raw_depth_shape: raw depth shape during infer. times sr_ratio will be the target resolution. Used to sample points during training transform_sample_gt_size: training depth shape # influenced by crop shape which is not included in this pipeline right now representation: I use it to test the "bilap head" and a discarded idea fetch_features: if fetch feats. Default=True """ super().__init__() self.coarse_model = coarse_model self.fine_model = fine_model self.max_depth = max_depth self.min_depth = min_depth self.min_temp = min_temp self.bin_centers_type = bin_centers_type self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.train_midas = train_midas self.inverse_midas = inverse_midas if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": # default SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") N_MIDAS_OUT = 32 btlnck_features = self.fine_model.core.output_channels[0] num_out_features = self.fine_model.core.output_channels[1:] # all of them are the same self.seed_bin_regressor = SeedBinRegressorLayer( btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
6
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" }, { "identifier": "GeodreamGeometryVolume", "path": "threestudio/models/geometry/geodream_geometry_volume.py", "snippet": "class GeodreamGeometryVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n init_volume_path: str = \"con_volume_lod0.pth\"\n one2345_weight: str = \"pretrain.pth\"\n sdf_network_grad: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n \n \n self.sdf_layers = SdfLayer()\n self.deviation_network = SingleVarianceNetwork(self.cfg.one2345_weight)\n\n # sdf_layers weight\n sdf_layers_weight = torch.load(self.cfg.one2345_weight)['sdf_network_lod0']\n selected_state_dict = {}\n prefix = 'sdf_layer'\n for key, value in sdf_layers_weight.items():\n if key.startswith(prefix):\n selected_state_dict[key[10:]] = value# key need remove sdf_layer prefix\n self.sdf_layers.load_state_dict(selected_state_dict)\n print(\"sdf_layers is loading weight at \" + self.cfg.one2345_weight)\n \n # sdf_layers freeze \n if self.cfg.sdf_network_grad:\n print(\"sdf_layers network is training\")\n else:\n for p in self.sdf_layers.parameters():\n p.requires_grad_(False)\n print(\"sdf_layers network is freezeing\")\n\n # volume weight\n volume_weight = torch.load(self.cfg.init_volume_path)\n\n self.volume = nn.Parameter(volume_weight, requires_grad=True)\n print(\"volume network is loading weight at \" + self.cfg.init_volume_path)\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], viewdirs, dists, output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n \n sdf, feature_vector = self.sdf(points.view(-1, self.cfg.n_input_dims))\n\n output = {\n \"density\": sdf,\n }\n \n g = self.gradient(points.view(-1, self.cfg.n_input_dims))\n alphas = self.get_alpha(points.view(-1, self.cfg.n_input_dims), viewdirs, dists, feature_vector, sdf, g)\n output.update({\"ALPHA\": alphas})\n\n \n points_norm = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n \n enc = self.encoding(points_norm.view(-1, self.cfg.n_input_dims))\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n \n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n density, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n density = density.reshape(*points.shape[:-1], 1)\n return density\n \n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n sdf, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n sdf = sdf.reshape(*points.shape[:-1], 1)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n \n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"GeodreamGeometryVolume\":\n if isinstance(other, GeodreamGeometryVolume):\n instance = GeodreamGeometryVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {GeodreamGeometryVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def forward_sdf(self, pts):\n sdf, _ = self.sdf(pts)\n return sdf\n \n def sdf(self, pts, lod=0):\n conditional_volume = self.volume\n num_pts = pts.shape[0]\n device = pts.device\n pts_ = pts.clone()\n pts = pts.view(1, 1, 1, num_pts, 3) # - should be in range (-1, 1)\n\n pts = torch.flip(pts, dims=[-1])\n sampled_feature = grid_sample_3d(conditional_volume, pts) # [1, c, 1, 1, num_pts]\n sampled_feature = sampled_feature.view(-1, num_pts).permute(1, 0).contiguous().to(device)\n\n sdf_pts = self.sdf_layers(pts_, sampled_feature)\n\n return sdf_pts[:, :1], sdf_pts[:, 1:]\n \n def get_alpha(self, ray_samples, rays_d, dists, feature_vector, sdf=None, gradients=None):\n \"\"\"compute alpha from sdf as in NeuS\"\"\"\n inv_variance = self.deviation_network(feature_vector)[:, :1].clip(1e-6, 1e6) # Single parameter\n\n \n #gradients = torch.ones_like(rays_d, requires_grad=False, device=rays_d.device)\n true_dot_val = (rays_d * gradients).sum(-1, keepdim=True) # * calculate\n alpha_inter_ratio = 0.0 \n iter_cos = -(F.relu(-true_dot_val * 0.5 + 0.5) * (1.0 - alpha_inter_ratio) + F.relu(\n -true_dot_val) * alpha_inter_ratio) # always non-positive\n\n true_estimate_sdf_half_next = sdf + iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n true_estimate_sdf_half_prev = sdf - iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(true_estimate_sdf_half_prev * inv_variance)\n next_cdf = torch.sigmoid(true_estimate_sdf_half_next * inv_variance)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)\n\n return alpha\n \n def gradient(self, x):\n \n x.requires_grad_(True)\n with torch.enable_grad():\n sdf, _ = self.sdf(x)\n y = sdf\n\n d_output = torch.ones_like(y, requires_grad=False, device=y.device)\n # ! Distributed Data Parallel doesn’t work with torch.autograd.grad()\n # ! (i.e. it will only work if gradients are to be accumulated in .grad attributes of parameters).\n gradients = torch.autograd.grad(\n outputs=y,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradients" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
18,340
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF) or isinstance(other, GeodreamGeometryVolume):
11
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample\n shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented\n for all models (such as downloading or saving).\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.\n center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.\n flip_sin_to_cos (`bool`, *optional*, defaults to `False`):\n Whether to flip the sin to cos in the time embedding.\n freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n mid_block_type (`str`, *optional*, defaults to `\"UNetMidBlock2DCrossAttn\"`):\n Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or\n `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\")`):\n The tuple of upsample blocks to use.\n only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):\n Whether to include self-attention in the basic transformer blocks, see\n [`~models.attention.BasicTransformerBlock`].\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, normalization and activation layers is skipped in post-processing.\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):\n The dimension of the cross attention features.\n transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):\n The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for\n [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],\n [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].\n encoder_hid_dim (`int`, *optional*, defaults to None):\n If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`\n dimension to `cross_attention_dim`.\n encoder_hid_dim_type (`str`, *optional*, defaults to `None`):\n If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text\n embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n num_attention_heads (`int`, *optional*):\n The number of attention heads. If not defined, defaults to `attention_head_dim`\n resnet_time_scale_shift (`str`, *optional*, defaults to `\"default\"`): Time scale shift config\n for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.\n class_embed_type (`str`, *optional*, defaults to `None`):\n The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,\n `\"timestep\"`, `\"identity\"`, `\"projection\"`, or `\"simple_projection\"`.\n addition_embed_type (`str`, *optional*, defaults to `None`):\n Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or\n \"text\". \"text\" will use the `TextTimeEmbedding` layer.\n addition_time_embed_dim: (`int`, *optional*, defaults to `None`):\n Dimension for the timestep embeddings.\n num_class_embeds (`int`, *optional*, defaults to `None`):\n Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing\n class conditioning with `class_embed_type` equal to `None`.\n time_embedding_type (`str`, *optional*, defaults to `positional`):\n The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.\n time_embedding_dim (`int`, *optional*, defaults to `None`):\n An optional override for the dimension of the projected time embedding.\n time_embedding_act_fn (`str`, *optional*, defaults to `None`):\n Optional activation function to use only once on the time embeddings before they are passed to the rest of\n the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.\n timestep_post_act (`str`, *optional*, defaults to `None`):\n The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.\n time_cond_proj_dim (`int`, *optional*, defaults to `None`):\n The dimension of `cond_proj` layer in the timestep embedding.\n conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.\n conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.\n projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when\n `class_embed_type=\"projection\"`. Required when `class_embed_type=\"projection\"`.\n class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time\n embeddings with the class embeddings.\n mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):\n Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If\n `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the\n `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`\n otherwise.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n mid_block_type: Optional[str] = \"UNetMidBlock2DCrossAttn\",\n up_block_types: Tuple[str] = (\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\"),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: Union[int, Tuple[int]] = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n dropout: float = 0.0,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: Union[int, Tuple[int]] = 1280,\n transformer_layers_per_block: Union[int, Tuple[int]] = 1,\n encoder_hid_dim: Optional[int] = None,\n encoder_hid_dim_type: Optional[str] = None,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n num_attention_heads: Optional[Union[int, Tuple[int]]] = None,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n addition_embed_type: Optional[str] = None,\n addition_time_embed_dim: Optional[int] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n resnet_skip_time_act: bool = False,\n resnet_out_scale_factor: int = 1.0,\n time_embedding_type: str = \"positional\",\n time_embedding_dim: Optional[int] = None,\n time_embedding_act_fn: Optional[str] = None,\n timestep_post_act: Optional[str] = None,\n time_cond_proj_dim: Optional[int] = None,\n conv_in_kernel: int = 3,\n conv_out_kernel: int = 3,\n projection_class_embeddings_input_dim: Optional[int] = None,\n attention_type: str = \"default\",\n class_embeddings_concat: bool = False,\n mid_block_only_cross_attention: Optional[bool] = None,\n cross_attention_norm: Optional[str] = None,\n addition_embed_type_num_heads=64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n\n if num_attention_heads is not None:\n raise ValueError(\n \"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.\"\n )\n\n # If `num_attention_heads` is not defined (which is the case for most models)\n # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.\n # The reason for this behavior is to correct for incorrectly named variables that were introduced\n # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131\n # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking\n # which is why we correct for the naming here.\n num_attention_heads = num_attention_heads or attention_head_dim\n\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n if time_embedding_type == \"fourier\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 2\n if time_embed_dim % 2 != 0:\n raise ValueError(f\"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.\")\n self.time_proj = GaussianFourierProjection(\n time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos\n )\n timestep_input_dim = time_embed_dim\n elif time_embedding_type == \"positional\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n else:\n raise ValueError(\n f\"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`.\"\n )\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n post_act_fn=timestep_post_act,\n cond_proj_dim=time_cond_proj_dim,\n )\n\n if encoder_hid_dim_type is None and encoder_hid_dim is not None:\n encoder_hid_dim_type = \"text_proj\"\n self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)\n logger.info(\"encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.\")\n\n if encoder_hid_dim is None and encoder_hid_dim_type is not None:\n raise ValueError(\n f\"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.\"\n )\n\n if encoder_hid_dim_type == \"text_proj\":\n self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)\n elif encoder_hid_dim_type == \"text_image_proj\":\n # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image_proj\"` (Kadinsky 2.1)`\n self.encoder_hid_proj = TextImageProjection(\n text_embed_dim=encoder_hid_dim,\n image_embed_dim=cross_attention_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2\n self.encoder_hid_proj = ImageProjection(\n image_embed_dim=encoder_hid_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type is not None:\n raise ValueError(\n f\"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'.\"\n )\n else:\n self.encoder_hid_proj = None\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif class_embed_type == \"simple_projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n if addition_embed_type == \"text\":\n if encoder_hid_dim is not None:\n text_time_embedding_from_dim = encoder_hid_dim\n else:\n text_time_embedding_from_dim = cross_attention_dim\n\n self.add_embedding = TextTimeEmbedding(\n text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads\n )\n elif addition_embed_type == \"text_image\":\n # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image\"` (Kadinsky 2.1)`\n self.add_embedding = TextImageTimeEmbedding(\n text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim\n )\n elif addition_embed_type == \"text_time\":\n self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)\n self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif addition_embed_type == \"image\":\n # Kandinsky 2.2\n self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 ControlNet\n self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type is not None:\n raise ValueError(f\"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.\")\n\n if time_embedding_act_fn is None:\n self.time_embed_act = None\n else:\n self.time_embed_act = get_activation(time_embedding_act_fn)\n\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = only_cross_attention\n\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = False\n\n if isinstance(num_attention_heads, int):\n num_attention_heads = (num_attention_heads,) * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n if isinstance(cross_attention_dim, int):\n cross_attention_dim = (cross_attention_dim,) * len(down_block_types)\n\n if isinstance(layers_per_block, int):\n layers_per_block = [layers_per_block] * len(down_block_types)\n\n if isinstance(transformer_layers_per_block, int):\n transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)\n\n if class_embeddings_concat:\n # The time embeddings are concatenated with the class embeddings. The dimension of the\n # time embeddings passed to the down, middle, and up blocks is twice the dimension of the\n # regular time embeddings\n blocks_time_embed_dim = time_embed_dim * 2\n else:\n blocks_time_embed_dim = time_embed_dim\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block[i],\n transformer_layers_per_block=transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=blocks_time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim[i],\n num_attention_heads=num_attention_heads[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock2DCrossAttn\":\n self.mid_block = UNetMidBlock2DCrossAttn(\n transformer_layers_per_block=transformer_layers_per_block[-1],\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim[-1],\n num_attention_heads=num_attention_heads[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n attention_type=attention_type,\n )\n elif mid_block_type == \"UNetMidBlock2DSimpleCrossAttn\":\n self.mid_block = UNetMidBlock2DSimpleCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim[-1],\n attention_head_dim=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n only_cross_attention=mid_block_only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif mid_block_type is None:\n self.mid_block = None\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_num_attention_heads = list(reversed(num_attention_heads))\n reversed_layers_per_block = list(reversed(layers_per_block))\n reversed_cross_attention_dim = list(reversed(cross_attention_dim))\n reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))\n only_cross_attention = list(reversed(only_cross_attention))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=reversed_layers_per_block[i] + 1,\n transformer_layers_per_block=reversed_transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=blocks_time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=reversed_cross_attention_dim[i],\n num_attention_heads=reversed_num_attention_heads[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n\n self.conv_act = get_activation(act_fn)\n\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n if attention_type in [\"gated\", \"gated-text-image\"]:\n positive_len = 768\n if isinstance(cross_attention_dim, int):\n positive_len = cross_attention_dim\n elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):\n positive_len = cross_attention_dim[0]\n\n feature_type = \"text-only\" if attention_type == \"gated\" else \"text-image\"\n self.position_net = PositionNet(\n positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type\n )\n\n @property\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"get_processor\"):\n processors[f\"{name}.processor\"] = module.get_processor(return_deprecated_lora=True)\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(\n self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False\n ):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor, _remove_lora=_remove_lora)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"), _remove_lora=_remove_lora)\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_default_attn_processor(self):\n \"\"\"\n Disables custom attention processors and sets the default attention implementation.\n \"\"\"\n if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnAddedKVProcessor()\n elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnProcessor()\n else:\n raise ValueError(\n f\"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}\"\n )\n\n self.set_attn_processor(processor, _remove_lora=True)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module splits the input tensor in slices to compute attention in\n several steps. This is useful for saving some memory in exchange for a small decrease in speed.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, input to the attention heads is halved, so attention is computed in two steps. If\n `\"max\"`, maximum amount of memory is saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if hasattr(module, \"gradient_checkpointing\"):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n quick_replicate: bool = False,\n replicate_prv_feature: Optional[List[torch.Tensor]] = None,\n cache_layer_id: Optional[int] = None,\n cache_block_id: Optional[int] = None,\n return_dict: bool = True,\n ) -> Union[UNet2DConditionOutput, Tuple]:\n r\"\"\"\n The [`UNet2DConditionModel`] forward method.\n\n Args:\n sample (`torch.FloatTensor`):\n The noisy input tensor with the following shape `(batch, channel, height, width)`.\n timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.\n encoder_attention_mask (`torch.Tensor`):\n A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If\n `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,\n which adds large negative values to the attention scores corresponding to \"discard\" tokens.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].\n added_cond_kwargs: (`dict`, *optional*):\n A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that\n are passed along to the UNet blocks.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise\n a `tuple` is returned where the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None:\n encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=sample.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n aug_emb = None\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # there might be better ways to encapsulate this.\n class_labels = class_labels.to(dtype=sample.dtype)\n\n class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)\n\n if self.config.class_embeddings_concat:\n emb = torch.cat([emb, class_emb], dim=-1)\n else:\n emb = emb + class_emb\n\n if self.config.addition_embed_type == \"text\":\n aug_emb = self.add_embedding(encoder_hidden_states)\n elif self.config.addition_embed_type == \"text_image\":\n # Kandinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n text_embs = added_cond_kwargs.get(\"text_embeds\", encoder_hidden_states)\n aug_emb = self.add_embedding(text_embs, image_embs)\n elif self.config.addition_embed_type == \"text_time\":\n # SDXL - style\n if \"text_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`\"\n )\n text_embeds = added_cond_kwargs.get(\"text_embeds\")\n if \"time_ids\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`\"\n )\n time_ids = added_cond_kwargs.get(\"time_ids\")\n time_embeds = self.add_time_proj(time_ids.flatten())\n time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))\n\n add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)\n add_embeds = add_embeds.to(emb.dtype)\n aug_emb = self.add_embedding(add_embeds)\n elif self.config.addition_embed_type == \"image\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n aug_emb = self.add_embedding(image_embs)\n elif self.config.addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs or \"hint\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n hint = added_cond_kwargs.get(\"hint\")\n aug_emb, hint = self.add_embedding(image_embs, hint)\n sample = torch.cat([sample, hint], dim=1)\n\n emb = emb + aug_emb if aug_emb is not None else emb\n\n if self.time_embed_act is not None:\n emb = self.time_embed_act(emb)\n\n if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_proj\":\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_image_proj\":\n # Kadinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(image_embeds)\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 2.5 GLIGEN position net\n if cross_attention_kwargs is not None and cross_attention_kwargs.get(\"gligen\", None) is not None:\n cross_attention_kwargs = cross_attention_kwargs.copy()\n gligen_args = cross_attention_kwargs.pop(\"gligen\")\n cross_attention_kwargs[\"gligen\"] = {\"objs\": self.position_net(**gligen_args)}\n\n # 3. down\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n if quick_replicate and replicate_prv_feature is not None:\n # Down\n for i, downsample_block in enumerate(self.down_blocks):\n if i > cache_layer_id:\n break\n\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n exist_block_number=cache_block_id if i == cache_layer_id else None,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n # No Middle\n # Up\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n sample = replicate_prv_feature\n #down_block_res_samples = down_block_res_samples[:-1]\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n\n for i, upsample_block in enumerate(self.up_blocks):\n if i < len(self.up_blocks) - 1 - cache_layer_id:\n continue\n\n if i == len(self.up_blocks) - 1 - cache_layer_id:\n trunc_upsample_block = cache_block_id + 1\n else:\n trunc_upsample_block = len(upsample_block.resnets)\n\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-trunc_upsample_block:]\n down_block_res_samples = down_block_res_samples[: -trunc_upsample_block]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n sample, _ = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n enter_block_number=cache_block_id if i == len(self.up_blocks) - 1 - cache_layer_id else None,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n \n prv_f = replicate_prv_feature\n else:\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n )\n # To support T2I-Adapter-XL\n if (\n is_adapter\n and len(down_block_additional_residuals) > 0\n and sample.shape == down_block_additional_residuals[0].shape\n ):\n sample += down_block_additional_residuals.pop(0)\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n if cache_block_id is not None:\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n #print(cache_block_id, cache_layer_id)\n prv_f = None\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample, current_record_f = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n current_record_f = None\n\n #print(\"Append prv_feature with shape:\", sample.shape)\n if cache_layer_id is not None and current_record_f is not None and i == len(self.up_blocks) - cache_layer_id - 1:\n prv_f = current_record_f[-cache_block_id-1]\n \n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n if not return_dict:\n return (sample, prv_f,)\n \n return UNet2DConditionOutput(sample=sample)" }, { "identifier": "StableDiffusionPipeline", "path": "DeepCache/sd/pipeline_stable_diffusion.py", "snippet": "EXAMPLE_DOC_STRING = \"\"\"\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\", torch_dtype=torch.float16)\n >>> pipe = pipe.to(\"cuda\")\n\n >>> prompt = \"a photo of an astronaut riding a horse on mars\"\n >>> image = pipe(prompt).images[0]\n ```\n\"\"\"\ndef sample_gaussian_centered(n=1000, sample_size=100, std_dev=100):\ndef sample_from_quad(total_numbers, n_samples, pow=1.2):\ndef sample_from_quad_center(total_numbers, n_samples, center, pow=1.2):\ndef rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n def enable_vae_slicing(self):\n def disable_vae_slicing(self):\n def enable_vae_tiling(self):\n def disable_vae_tiling(self):\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def run_safety_checker(self, image, device, dtype):\n def decode_latents(self, latents):\n def prepare_extra_step_kwargs(self, generator, eta):\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n ):\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n cache_interval: int = 1,\n cache_layer_id: int = None,\n cache_block_id: int = None,\n uniform: bool = True,\n pow: float = None,\n center: int = None,\n output_all_sequence: bool = False,\n ):\nclass StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):" } ]
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
16,961
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents
class TextToVideoZeroPipeline(StableDiffusionPipeline):
1
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,932
if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
gen_t2m.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n clip_version=None, opt=None, **kargs):\n super(MaskTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n elif self.cond_mode == 'uncond':\n self.cond_emb = nn.Identity()\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 2 # two dummy tokens, one for masking, one for padding\n self.mask_id = opt.num_tokens\n self.pad_id = opt.num_tokens + 1\n\n self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n\n self.token_emb = nn.Embedding(_num_tokens, self.code_dim)\n\n self.apply(self.__init_weights)\n\n '''\n Preparing frozen weights\n '''\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n self.noise_schedule = cosine_schedule\n\n def load_and_freeze_token_emb(self, codebook):\n '''\n :param codebook: (c, d)\n :return:\n '''\n assert self.training, 'Only necessary in training mode'\n c, d = codebook.shape\n self.token_emb.weight = nn.Parameter(torch.cat([codebook, torch.zeros(size=(2, d), device=codebook.device)], dim=0)) #add two dummy tokens, 0 vectors\n self.token_emb.requires_grad_(False)\n # self.token_emb.weight.requires_grad = False\n # self.token_emb_ready = True\n print(\"Token embedding initialized!\")\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def trans_forward(self, motion_ids, cond, padding_mask, force_mask=False):\n '''\n :param motion_ids: (b, seqlen)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :param force_mask: boolean\n :return:\n -logits: (b, num_token, seqlen)\n '''\n\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # print(motion_ids.shape)\n x = self.token_emb(motion_ids)\n # print(x.shape)\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(x)\n\n cond = self.cond_emb(cond).unsqueeze(0) #(1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, x], dim=0) #(seqlen+1, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:1]), padding_mask], dim=1) #(b, seqlen+1)\n # print(xseq.shape, padding_mask.shape)\n\n # print(padding_mask.shape, xseq.shape)\n\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[1:] #(seqlen, b, e)\n logits = self.output_process(output) #(seqlen, b, e) -> (b, ntoken, seqlen)\n return logits\n\n def forward(self, ids, y, m_lens):\n '''\n :param ids: (b, n)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n bs, ntokens = ids.shape\n device = ids.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) #(b, n)\n ids = torch.where(non_pad_mask, ids, self.pad_id)\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n\n '''\n Prepare mask\n '''\n rand_time = uniform((bs,), device=device)\n rand_mask_probs = self.noise_schedule(rand_time)\n num_token_masked = (ntokens * rand_mask_probs).round().clamp(min=1)\n\n batch_randperm = torch.rand((bs, ntokens), device=device).argsort(dim=-1)\n # Positions to be MASKED are ALL TRUE\n mask = batch_randperm < num_token_masked.unsqueeze(-1)\n\n # Positions to be MASKED must also be NON-PADDED\n mask &= non_pad_mask\n\n # Note this is our training target, not input\n labels = torch.where(mask, ids, self.mask_id)\n\n x_ids = ids.clone()\n\n # Further Apply Bert Masking Scheme\n # Step 1: 10% replace with an incorrect token\n mask_rid = get_mask_subset_prob(mask, 0.1)\n rand_id = torch.randint_like(x_ids, high=self.opt.num_tokens)\n x_ids = torch.where(mask_rid, rand_id, x_ids)\n # Step 2: 90% x 10% replace with correct token, and 90% x 88% replace with mask token\n mask_mid = get_mask_subset_prob(mask & ~mask_rid, 0.88)\n\n # mask_mid = mask\n\n x_ids = torch.where(mask_mid, self.mask_id, x_ids)\n\n logits = self.trans_forward(x_ids, cond_vector, ~non_pad_mask, force_mask)\n ce_loss, pred_id, acc = cal_performance(logits, labels, ignore_index=self.mask_id)\n\n return ce_loss, pred_id, acc\n\n def forward_with_cond_scale(self,\n motion_ids,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n # bs = motion_ids.shape[0]\n # if cond_scale == 1:\n if force_mask:\n return self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n logits = self.trans_forward(motion_ids, cond_vector, padding_mask)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n conds,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False\n ):\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n\n device = next(self.parameters()).device\n seq_len = max(m_lens)\n batch_size = len(m_lens)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, )\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, self.mask_id)\n scores = torch.where(padding_mask, 1e5, 0.)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * m_lens).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n conds,\n tokens,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False,\n edit_mask=None,\n padding_mask=None,\n ):\n\n assert edit_mask.shape == tokens.shape if edit_mask is not None else True\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(1, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n if padding_mask == None:\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n if edit_mask == None:\n mask_free = True\n ids = torch.where(padding_mask, self.pad_id, tokens)\n edit_mask = torch.ones_like(padding_mask)\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n scores = torch.where(edit_mask, 0., 1e5)\n else:\n mask_free = False\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n ids = torch.where(edit_mask, self.mask_id, tokens)\n scores = torch.where(edit_mask, 0., 1e5)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = 0.16 if mask_free else self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * edit_len).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n # is_mask = (torch.rand_like(scores) < 0.8) * ~padding_mask if mask_free else is_mask\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~edit_mask, 1e5) if mask_free else scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n @torch.no_grad()\n @eval_decorator\n def edit_beta(self,\n conds,\n conds_og,\n tokens,\n m_lens,\n cond_scale: int,\n force_mask=False,\n ):\n\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n if conds_og is not None:\n cond_vector_og = self.encode_text(conds_og)\n else:\n cond_vector_og = None\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n if conds_og is not None:\n cond_vector_og = self.enc_action(conds_og).to(device)\n else:\n cond_vector_og = None\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, tokens) # Do not mask anything\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids,\n cond_vector=cond_vector,\n cond_vector_neg=cond_vector_og,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n tokens[tokens == -1] = 0 # just to get through an error when index = -1 using gather\n og_tokens_scores = probs_without_temperature.gather(2, tokens.unsqueeze(dim=-1)) # (b, seqlen, 1)\n og_tokens_scores = og_tokens_scores.squeeze(-1) # (b, seqlen)\n\n return og_tokens_scores" }, { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "LengthEstimator", "path": "models/vq/model.py", "snippet": "class LengthEstimator(nn.Module):\n def __init__(self, input_size, output_size):\n super(LengthEstimator, self).__init__()\n nd = 512\n self.output = nn.Sequential(\n nn.Linear(input_size, nd),\n nn.LayerNorm(nd),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Dropout(0.2),\n nn.Linear(nd, nd // 2),\n nn.LayerNorm(nd // 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Dropout(0.2),\n nn.Linear(nd // 2, nd // 4),\n nn.LayerNorm(nd // 4),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Linear(nd // 4, output_size)\n )\n\n self.output.apply(self.__init_weights)\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def forward(self, text_emb):\n return self.output(text_emb)" }, { "identifier": "EvalT2MOptions", "path": "options/eval_option.py", "snippet": "class EvalT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--which_epoch', type=str, default=\"latest\", help='Checkpoint you want to use, {latest, net_best_fid, etc}')\n self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size')\n\n self.parser.add_argument('--ext', type=str, default='text2motion', help='Extension of the result file or folder')\n self.parser.add_argument(\"--num_batch\", default=2, type=int,\n help=\"Number of batch for generation\")\n self.parser.add_argument(\"--repeat_times\", default=1, type=int,\n help=\"Number of repetitions, per sample text prompt\")\n self.parser.add_argument(\"--cond_scale\", default=4, type=float,\n help=\"For classifier-free sampling - specifies the s parameter, as defined in the paper.\")\n self.parser.add_argument(\"--temperature\", default=1., type=float,\n help=\"Sampling Temperature.\")\n self.parser.add_argument(\"--topkr\", default=0.9, type=float,\n help=\"Filter out percentil low prop entries.\")\n self.parser.add_argument(\"--time_steps\", default=18, type=int,\n help=\"Mask Generate steps.\")\n self.parser.add_argument(\"--seed\", default=10107, type=int)\n\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='True: gumbel sampling, False: categorical sampling.')\n self.parser.add_argument('--use_res_model', action=\"store_true\", help='Whether to use residual transformer.')\n # self.parser.add_argument('--est_length', action=\"store_true\", help='Training iterations')\n\n self.parser.add_argument('--res_name', type=str, default='tres_nlayer8_ld384_ff1024_rvq6ns_cdp0.2_sw', help='Model name of residual transformer')\n self.parser.add_argument('--text_path', type=str, default=\"\", help='Text prompt file')\n\n\n self.parser.add_argument('-msec', '--mask_edit_section', nargs='*', type=str, help='Indicate sections for editing, use comma to separate the start and end of a section'\n 'type int will specify the token frame, type float will specify the ratio of seq_len')\n self.parser.add_argument('--text_prompt', default='', type=str, help=\"A text prompt to be generated. If empty, will take text prompts from dataset.\")\n self.parser.add_argument('--source_motion', default='example_data/000612.npy', type=str, help=\"Source motion path for editing. (new_joint_vecs format .npy file)\")\n self.parser.add_argument(\"--motion_length\", default=0, type=int,\n help=\"Motion length for generation, only applicable with single text prompt.\")\n self.is_train = False" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "Joint2BVHConvertor", "path": "visualization/joints2bvh.py", "snippet": "class Joint2BVHConvertor:\n def __init__(self):\n self.template = BVH.load('./visualization/data/template.bvh', need_quater=True)\n self.re_order = [0, 1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12, 15, 13, 16, 18, 20, 14, 17, 19, 21]\n\n self.re_order_inv = [0, 1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12, 14, 18, 13, 15, 19, 16, 20, 17, 21]\n self.end_points = [4, 8, 13, 17, 21]\n\n self.template_offset = self.template.offsets.copy()\n self.parents = [-1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 11, 18, 19, 20]\n\n def convert(self, positions, filename, iterations=10, foot_ik=True):\n '''\n Convert the SMPL joint positions to Mocap BVH\n :param positions: (N, 22, 3)\n :param filename: Save path for resulting BVH\n :param iterations: iterations for optimizing rotations, 10 is usually enough\n :param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.\n :return:\n '''\n positions = positions[:, self.re_order]\n new_anim = self.template.copy()\n new_anim.rotations = Quaternions.id(positions.shape[:-1])\n new_anim.positions = new_anim.positions[0:1].repeat(positions.shape[0], axis=-0)\n new_anim.positions[:, 0] = positions[:, 0]\n\n if foot_ik:\n positions = remove_fs(positions, None, fid_l=(3, 4), fid_r=(7, 8), interp_length=5,\n force_on_floor=True)\n ik_solver = BasicInverseKinematics(new_anim, positions, iterations=iterations, silent=True)\n new_anim = ik_solver()\n\n # BVH.save(filename, new_anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)\n glb = Animation.positions_global(new_anim)[:, self.re_order_inv]\n if filename is not None:\n BVH.save(filename, new_anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)\n return new_anim, glb\n\n def convert_sgd(self, positions, filename, iterations=100, foot_ik=True):\n '''\n Convert the SMPL joint positions to Mocap BVH\n\n :param positions: (N, 22, 3)\n :param filename: Save path for resulting BVH\n :param iterations: iterations for optimizing rotations, 10 is usually enough\n :param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.\n :return:\n '''\n\n ## Positional Foot locking ##\n glb = positions[:, self.re_order]\n\n if foot_ik:\n glb = remove_fs(glb, None, fid_l=(3, 4), fid_r=(7, 8), interp_length=2,\n force_on_floor=True)\n\n ## Fit BVH ##\n new_anim = self.template.copy()\n new_anim.rotations = Quaternions.id(glb.shape[:-1])\n new_anim.positions = new_anim.positions[0:1].repeat(glb.shape[0], axis=-0)\n new_anim.positions[:, 0] = glb[:, 0]\n anim = new_anim.copy()\n\n rot = torch.tensor(anim.rotations.qs, dtype=torch.float)\n pos = torch.tensor(anim.positions[:, 0, :], dtype=torch.float)\n offset = torch.tensor(anim.offsets, dtype=torch.float)\n\n glb = torch.tensor(glb, dtype=torch.float)\n ik_solver = InverseKinematics(rot, pos, offset, anim.parents, glb)\n print('Fixing foot contact using IK...')\n for i in tqdm(range(iterations)):\n mse = ik_solver.step()\n # print(i, mse)\n\n rotations = ik_solver.rotations.detach().cpu()\n norm = torch.norm(rotations, dim=-1, keepdim=True)\n rotations /= norm\n\n anim.rotations = Quaternions(rotations.numpy())\n anim.rotations[:, self.end_points] = Quaternions.id((anim.rotations.shape[0], len(self.end_points)))\n anim.positions[:, 0, :] = ik_solver.position.detach().cpu().numpy()\n if filename is not None:\n BVH.save(filename, anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)\n # BVH.save(filename[:-3] + 'bvh', anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)\n glb = Animation.positions_global(anim)[:, self.re_order_inv]\n return anim, glb" }, { "identifier": "recover_from_ric", "path": "utils/motion_process.py", "snippet": "def recover_from_ric(data, joints_num):\n r_rot_quat, r_pos = recover_root_rot_pos(data)\n positions = data[..., 4:(joints_num - 1) * 3 + 4]\n positions = positions.view(positions.shape[:-1] + (-1, 3))\n\n '''Add Y-axis rotation to local joints'''\n positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)\n\n '''Add root XZ to joints'''\n positions[..., 0] += r_pos[..., 0:1]\n positions[..., 2] += r_pos[..., 2:3]\n\n '''Concate root and joints'''\n positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)\n\n return positions" }, { "identifier": "plot_3d_motion", "path": "utils/plot_script.py", "snippet": "def plot_3d_motion(save_path, kinematic_tree, joints, title, figsize=(10, 10), fps=120, radius=4):\n matplotlib.use('Agg')\n\n title_sp = title.split(' ')\n if len(title_sp) > 20:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:20]), ' '.join(title_sp[20:])])\n elif len(title_sp) > 10:\n title = '\\n'.join([' '.join(title_sp[:10]), ' '.join(title_sp[10:])])\n\n def init():\n ax.set_xlim3d([-radius / 2, radius / 2])\n ax.set_ylim3d([0, radius])\n ax.set_zlim3d([0, radius])\n # print(title)\n fig.suptitle(title, fontsize=20)\n ax.grid(b=False)\n\n def plot_xzPlane(minx, maxx, miny, minz, maxz):\n ## Plot a plane XZ\n verts = [\n [minx, miny, minz],\n [minx, miny, maxz],\n [maxx, miny, maxz],\n [maxx, miny, minz]\n ]\n xz_plane = Poly3DCollection([verts])\n xz_plane.set_facecolor((0.5, 0.5, 0.5, 0.5))\n ax.add_collection3d(xz_plane)\n\n # return ax\n\n # (seq_len, joints_num, 3)\n data = joints.copy().reshape(len(joints), -1, 3)\n fig = plt.figure(figsize=figsize)\n ax = p3.Axes3D(fig)\n init()\n MINS = data.min(axis=0).min(axis=0)\n MAXS = data.max(axis=0).max(axis=0)\n colors = ['red', 'blue', 'black', 'red', 'blue',\n 'darkblue', 'darkblue', 'darkblue', 'darkblue', 'darkblue',\n 'darkred', 'darkred', 'darkred', 'darkred', 'darkred']\n frame_number = data.shape[0]\n # print(data.shape)\n\n height_offset = MINS[1]\n data[:, :, 1] -= height_offset\n trajec = data[:, 0, [0, 2]]\n\n data[..., 0] -= data[:, 0:1, 0]\n data[..., 2] -= data[:, 0:1, 2]\n\n # print(trajec.shape)\n\n def update(index):\n # print(index)\n ax.lines = []\n ax.collections = []\n ax.view_init(elev=120, azim=-90)\n ax.dist = 7.5\n # ax =\n plot_xzPlane(MINS[0] - trajec[index, 0], MAXS[0] - trajec[index, 0], 0, MINS[2] - trajec[index, 1],\n MAXS[2] - trajec[index, 1])\n # ax.scatter(data[index, :22, 0], data[index, :22, 1], data[index, :22, 2], color='black', s=3)\n\n if index > 1:\n ax.plot3D(trajec[:index, 0] - trajec[index, 0], np.zeros_like(trajec[:index, 0]),\n trajec[:index, 1] - trajec[index, 1], linewidth=1.0,\n color='blue')\n # ax = plot_xzPlane(ax, MINS[0], MAXS[0], 0, MINS[2], MAXS[2])\n\n for i, (chain, color) in enumerate(zip(kinematic_tree, colors)):\n # print(color)\n if i < 5:\n linewidth = 4.0\n else:\n linewidth = 2.0\n ax.plot3D(data[index, chain, 0], data[index, chain, 1], data[index, chain, 2], linewidth=linewidth,\n color=color)\n # print(trajec[:index, 0].shape)\n\n plt.axis('off')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_zticklabels([])\n\n ani = FuncAnimation(fig, update, frames=frame_number, interval=1000 / fps, repeat=False)\n\n # writer = FFMpegFileWriter(fps=fps)\n ani.save(save_path, fps=fps)\n plt.close()" }, { "identifier": "t2m_kinematic_chain", "path": "utils/paramUtil.py", "snippet": "" } ]
import os import torch import torch.nn.functional as F import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE, LengthEstimator from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from utils.fixseed import fixseed from visualization.joints2bvh import Joint2BVHConvertor from torch.distributions.categorical import Categorical from utils.motion_process import recover_from_ric from utils.plot_script import plot_3d_motion from utils.paramUtil import t2m_kinematic_chain
16,497
clip_version = 'ViT-B/32' def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, vq_opt.dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location='cpu') model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location='cpu') model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt, vq_opt, opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer def load_len_estimator(opt): model = LengthEstimator(512, 50) ckpt = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_estimator', 'model', 'finest.tar'), map_location=opt.device) model.load_state_dict(ckpt['estimator']) print(f'Loading Length Estimator from epoch {ckpt["epoch"]}!') return model if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse()
clip_version = 'ViT-B/32' def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, vq_opt.dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location='cpu') model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location='cpu') model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt, vq_opt, opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer def load_len_estimator(opt): model = LengthEstimator(512, 50) ckpt = torch.load(pjoin(opt.checkpoints_dir, opt.dataset_name, 'length_estimator', 'model', 'finest.tar'), map_location=opt.device) model.load_state_dict(ckpt['estimator']) print(f'Loading Length Estimator from epoch {ckpt["epoch"]}!') return model if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse()
fixseed(opt.seed)
6
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/models/transformer_temporal.py
[ { "identifier": "ConfigMixin", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "llmga/diffusers/src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "BaseOutput", "path": "llmga/diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "BasicTransformerBlock", "path": "llmga/diffusers/src/diffusers/models/attention.py", "snippet": "class BasicTransformerBlock(nn.Module):\n r\"\"\"\n A basic Transformer block.\n\n Parameters:\n dim (`int`): The number of channels in the input and output.\n num_attention_heads (`int`): The number of heads to use for multi-head attention.\n attention_head_dim (`int`): The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to be used in feed-forward.\n num_embeds_ada_norm (:\n obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.\n attention_bias (:\n obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.\n only_cross_attention (`bool`, *optional*):\n Whether to use only cross-attention layers. In this case two cross attention layers are used.\n double_self_attention (`bool`, *optional*):\n Whether to use two self-attention layers. In this case no cross attention layers are used.\n upcast_attention (`bool`, *optional*):\n Whether to upcast the attention computation to float32. This is useful for mixed precision training.\n norm_elementwise_affine (`bool`, *optional*, defaults to `True`):\n Whether to use learnable elementwise affine parameters for normalization.\n norm_type (`str`, *optional*, defaults to `\"layer_norm\"`):\n The normalization layer to use. Can be `\"layer_norm\"`, `\"ada_norm\"` or `\"ada_norm_zero\"`.\n final_dropout (`bool` *optional*, defaults to False):\n Whether to apply a final dropout after the last feed-forward layer.\n attention_type (`str`, *optional*, defaults to `\"default\"`):\n The type of attention to use. Can be `\"default\"` or `\"gated\"` or `\"gated-text-image\"`.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n dropout=0.0,\n cross_attention_dim: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n attention_bias: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_elementwise_affine: bool = True,\n norm_type: str = \"layer_norm\",\n final_dropout: bool = False,\n attention_type: str = \"default\",\n ):\n super().__init__()\n self.only_cross_attention = only_cross_attention\n\n self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm_zero\"\n self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm\"\n\n if norm_type in (\"ada_norm\", \"ada_norm_zero\") and num_embeds_ada_norm is None:\n raise ValueError(\n f\"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to\"\n f\" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.\"\n )\n\n # Define 3 blocks. Each block has its own normalization layer.\n # 1. Self-Attn\n if self.use_ada_layer_norm:\n self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)\n elif self.use_ada_layer_norm_zero:\n self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)\n else:\n self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n self.attn1 = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n cross_attention_dim=cross_attention_dim if only_cross_attention else None,\n upcast_attention=upcast_attention,\n )\n\n # 2. Cross-Attn\n if cross_attention_dim is not None or double_self_attention:\n # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.\n # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during\n # the second cross attention block.\n self.norm2 = (\n AdaLayerNorm(dim, num_embeds_ada_norm)\n if self.use_ada_layer_norm\n else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n )\n self.attn2 = Attention(\n query_dim=dim,\n cross_attention_dim=cross_attention_dim if not double_self_attention else None,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n upcast_attention=upcast_attention,\n ) # is self-attn if encoder_hidden_states is none\n else:\n self.norm2 = None\n self.attn2 = None\n\n # 3. Feed-forward\n self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)\n\n # 4. Fuser\n if attention_type == \"gated\" or attention_type == \"gated-text-image\":\n self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)\n\n # let chunk size default to None\n self._chunk_size = None\n self._chunk_dim = 0\n\n def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):\n # Sets chunk feed-forward\n self._chunk_size = chunk_size\n self._chunk_dim = dim\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n ) -> torch.FloatTensor:\n # Notice that normalization is always applied before the real computation in the following blocks.\n # 0. Self-Attention\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Retrieve lora scale.\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n # 2. Prepare GLIGEN inputs\n cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}\n gligen_kwargs = cross_attention_kwargs.pop(\"gligen\", None)\n\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n # 2.5 GLIGEN Control\n if gligen_kwargs is not None:\n hidden_states = self.fuser(hidden_states, gligen_kwargs[\"objs\"])\n # 2.5 ends\n\n # 3. Cross-Attention\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 4. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n if self._chunk_size is not None:\n # \"feed_forward_chunk_size\" can be used to save memory\n if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:\n raise ValueError(\n f\"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.\"\n )\n\n num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size\n ff_output = torch.cat(\n [\n self.ff(hid_slice, scale=lora_scale)\n for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)\n ],\n dim=self._chunk_dim,\n )\n else:\n ff_output = self.ff(norm_hidden_states, scale=lora_scale)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states" }, { "identifier": "ModelMixin", "path": "llmga/diffusers/src/diffusers/models/modeling_utils.py", "snippet": "class ModelMixin(torch.nn.Module, PushToHubMixin):\n r\"\"\"\n Base class for all models.\n\n [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and\n saving models.\n\n - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`].\n \"\"\"\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _supports_gradient_checkpointing = False\n _keys_to_ignore_on_load_unexpected = None\n _hf_peft_config_loaded = False\n\n def __init__(self):\n super().__init__()\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite\n __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__':\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False, stacklevel=3)\n return self._internal_dict[name]\n\n # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n return super().__getattr__(name)\n\n @property\n def is_gradient_checkpointing(self) -> bool:\n \"\"\"\n Whether gradient checkpointing is activated for this model or not.\n \"\"\"\n return any(hasattr(m, \"gradient_checkpointing\") and m.gradient_checkpointing for m in self.modules())\n\n def enable_gradient_checkpointing(self):\n \"\"\"\n Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if not self._supports_gradient_checkpointing:\n raise ValueError(f\"{self.__class__.__name__} does not support gradient checkpointing.\")\n self.apply(partial(self._set_gradient_checkpointing, value=True))\n\n def disable_gradient_checkpointing(self):\n \"\"\"\n Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if self._supports_gradient_checkpointing:\n self.apply(partial(self._set_gradient_checkpointing, value=False))\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n for module in self.children():\n if isinstance(module, torch.nn.Module):\n fn_recursive_set_mem_eff(module)\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):\n r\"\"\"\n Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up during\n inference. Speed up during training is not guaranteed.\n\n <Tip warning={true}>\n\n ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes\n precedent.\n\n </Tip>\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import UNet2DConditionModel\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> model = UNet2DConditionModel.from_pretrained(\n ... \"stabilityai/stable-diffusion-2-1\", subfolder=\"unet\", torch_dtype=torch.float16\n ... )\n >>> model = model.to(\"cuda\")\n >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def add_adapter(self, adapter_config, adapter_name: str = \"default\") -> None:\n r\"\"\"\n Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned\n to the adapter to follow the convention of the PEFT library.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT\n [documentation](https://huggingface.co/docs/peft).\n\n Args:\n adapter_config (`[~peft.PeftConfig]`):\n The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt\n methods.\n adapter_name (`str`, *optional*, defaults to `\"default\"`):\n The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n from peft import PeftConfig, inject_adapter_in_model\n\n if not self._hf_peft_config_loaded:\n self._hf_peft_config_loaded = True\n elif adapter_name in self.peft_config:\n raise ValueError(f\"Adapter with name {adapter_name} already exists. Please use a different name.\")\n\n if not isinstance(adapter_config, PeftConfig):\n raise ValueError(\n f\"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.\"\n )\n\n # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is\n # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here.\n adapter_config.base_model_name_or_path = None\n inject_adapter_in_model(adapter_config, self, adapter_name)\n self.set_adapter(adapter_name)\n\n def set_adapter(self, adapter_name: Union[str, List[str]]) -> None:\n \"\"\"\n Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n\n Args:\n adapter_name (Union[str, List[str]])):\n The list of adapters to set or the adapter name in case of single adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n if isinstance(adapter_name, str):\n adapter_name = [adapter_name]\n\n missing = set(adapter_name) - set(self.peft_config)\n if len(missing) > 0:\n raise ValueError(\n f\"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s).\"\n f\" current loaded adapters are: {list(self.peft_config.keys())}\"\n )\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n _adapters_has_been_set = False\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"set_adapter\"):\n module.set_adapter(adapter_name)\n # Previous versions of PEFT does not support multi-adapter inference\n elif not hasattr(module, \"set_adapter\") and len(adapter_name) != 1:\n raise ValueError(\n \"You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT.\"\n \" `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`\"\n )\n else:\n module.active_adapter = adapter_name\n _adapters_has_been_set = True\n\n if not _adapters_has_been_set:\n raise ValueError(\n \"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.\"\n )\n\n def disable_adapters(self) -> None:\n r\"\"\"\n Disable all adapters attached to the model and fallback to inference with the base model only.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=False)\n else:\n # support for older PEFT versions\n module.disable_adapters = True\n\n def enable_adapters(self) -> None:\n \"\"\"\n Enable adapters that are attached to the model. The model will use `self.active_adapters()` to retrieve the\n list of adapters to enable.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=True)\n else:\n # support for older PEFT versions\n module.disable_adapters = False\n\n def active_adapters(self) -> List[str]:\n \"\"\"\n Gets the current list of active adapters of the model.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n return module.active_adapter\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n is_main_process: bool = True,\n save_function: Callable = None,\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory so that it can be reloaded using the\n [`~models.ModelMixin.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a model and its configuration file to. Will be created if it doesn't exist.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful during distributed training and you\n need to call this function on all processes. In this case, set `is_main_process=True` only on the main\n process to avoid race conditions.\n save_function (`Callable`):\n The function to use to save the state dictionary. Useful during distributed training when you need to\n replace `torch.save` with another method. Can be configured with the environment variable\n `DIFFUSERS_SAVE_MODE`.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n # Only save the model itself if we are using distributed training\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # Save the model\n state_dict = model_to_save.state_dict()\n\n weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME\n weights_name = _add_variant(weights_name, variant)\n\n # Save the model\n if safe_serialization:\n safetensors.torch.save_file(\n state_dict, os.path.join(save_directory, weights_name), metadata={\"format\": \"pt\"}\n )\n else:\n torch.save(state_dict, os.path.join(save_directory, weights_name))\n\n logger.info(f\"Model weights saved in {os.path.join(save_directory, weights_name)}\")\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a pretrained PyTorch model from a pretrained model configuration.\n\n The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To\n train the model, set it back in training mode with `model.train()`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`~ModelMixin.save_pretrained`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model with another dtype. If `\"auto\"` is passed, the\n dtype is automatically derived from the model's weights.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info (`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n from_flax (`bool`, *optional*, defaults to `False`):\n Load the model weights from a Flax checkpoint save file.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you're downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn't need to be defined for each\n parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n Set `device_map=\"auto\"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier for the maximum memory. Will default to the maximum memory available for\n each GPU and the available CPU RAM if unset.\n offload_folder (`str` or `os.PathLike`, *optional*):\n The path to offload weights if `device_map` contains the value `\"disk\"`.\n offload_state_dict (`bool`, *optional*):\n If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if\n the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`\n when there is some disk offload.\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading only loading the pretrained weights and not initializing the weights. This also\n tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.\n Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this\n argument to `True` will raise an error.\n variant (`str`, *optional*):\n Load weights from a specified `variant` filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the\n `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors`\n weights. If set to `False`, `safetensors` weights are not loaded.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n unet = UNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\", subfolder=\"unet\")\n ```\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```bash\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n ignore_mismatched_sizes = kwargs.pop(\"ignore_mismatched_sizes\", False)\n force_download = kwargs.pop(\"force_download\", False)\n from_flax = kwargs.pop(\"from_flax\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n device_map = kwargs.pop(\"device_map\", None)\n max_memory = kwargs.pop(\"max_memory\", None)\n offload_folder = kwargs.pop(\"offload_folder\", None)\n offload_state_dict = kwargs.pop(\"offload_state_dict\", False)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = True\n allow_pickle = True\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_accelerate_available():\n raise NotImplementedError(\n \"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set\"\n \" `device_map=None`. You can install accelerate with `pip install accelerate`.\"\n )\n\n # Check if we can handle device_map and dispatching the weights\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # Load config if we don't provide a configuration\n config_path = pretrained_model_name_or_path\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"pytorch\",\n }\n\n # load config\n config, unused_kwargs, commit_hash = cls.load_config(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n return_commit_hash=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n device_map=device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n user_agent=user_agent,\n **kwargs,\n )\n\n # load model\n model_file = None\n if from_flax:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=FLAX_WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n model = cls.from_config(config, **unused_kwargs)\n\n # Convert the weights\n from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model\n\n model = load_flax_checkpoint_in_pytorch_model(model, model_file)\n else:\n if use_safetensors:\n try:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n except IOError as e:\n if not allow_pickle:\n raise e\n pass\n if model_file is None:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n\n if low_cpu_mem_usage:\n # Instantiate model with empty weights\n with accelerate.init_empty_weights():\n model = cls.from_config(config, **unused_kwargs)\n\n # if device_map is None, load the state dict and move the params from meta device to the cpu\n if device_map is None:\n param_device = \"cpu\"\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n # move the params from meta device to cpu\n missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())\n if len(missing_keys) > 0:\n raise ValueError(\n f\"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are\"\n f\" missing: \\n {', '.join(missing_keys)}. \\n Please make sure to pass\"\n \" `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize\"\n \" those weights or else make sure your checkpoint file is correct.\"\n )\n\n unexpected_keys = load_model_dict_into_meta(\n model,\n state_dict,\n device=param_device,\n dtype=torch_dtype,\n model_name_or_path=pretrained_model_name_or_path,\n )\n\n if cls._keys_to_ignore_on_load_unexpected is not None:\n for pat in cls._keys_to_ignore_on_load_unexpected:\n unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]\n\n if len(unexpected_keys) > 0:\n logger.warn(\n f\"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \\n {[', '.join(unexpected_keys)]}\"\n )\n\n else: # else let accelerate handle loading and dispatching.\n # Load weights and dispatch according to the device_map\n # by default the device_map is None and the weights are loaded on the CPU\n try:\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n except AttributeError as e:\n # When using accelerate loading, we do not have the ability to load the state\n # dict and rename the weight names manually. Additionally, accelerate skips\n # torch loading conventions and directly writes into `module.{_buffers, _parameters}`\n # (which look like they should be private variables?), so we can't use the standard hooks\n # to rename parameters on load. We need to mimic the original weight names so the correct\n # attributes are available. After we have loaded the weights, we convert the deprecated\n # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert\n # the weights so we don't have to do this again.\n\n if \"'Attention' object has no attribute\" in str(e):\n logger.warn(\n f\"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}\"\n \" was saved with deprecated attention block weight names. We will load it with the deprecated attention block\"\n \" names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,\"\n \" so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint,\"\n \" please also re-upload it or open a PR on the original repository.\"\n )\n model._temp_convert_self_to_deprecated_attention_blocks()\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n model._undo_temp_convert_self_to_deprecated_attention_blocks()\n else:\n raise e\n\n loading_info = {\n \"missing_keys\": [],\n \"unexpected_keys\": [],\n \"mismatched_keys\": [],\n \"error_msgs\": [],\n }\n else:\n model = cls.from_config(config, **unused_kwargs)\n\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n\n model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(\n model,\n state_dict,\n model_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=ignore_mismatched_sizes,\n )\n\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"mismatched_keys\": mismatched_keys,\n \"error_msgs\": error_msgs,\n }\n\n if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):\n raise ValueError(\n f\"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.\"\n )\n elif torch_dtype is not None:\n model = model.to(torch_dtype)\n\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n if output_loading_info:\n return model, loading_info\n\n return model\n\n @classmethod\n def _load_pretrained_model(\n cls,\n model,\n state_dict,\n resolved_archive_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=False,\n ):\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n loaded_keys = list(state_dict.keys())\n\n expected_keys = list(model_state_dict.keys())\n\n original_loaded_keys = loaded_keys\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = list(set(loaded_keys) - set(expected_keys))\n\n # Make sure we are able to load base models as well as derived models (with heads)\n model_to_load = model\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n model_key = checkpoint_key\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n return mismatched_keys\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task\"\n \" or with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly\"\n \" identical (initializing a BertForSequenceClassification model from a\"\n \" BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the\"\n f\" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions\"\n \" without further training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be\"\n \" able to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs\n\n @property\n def device(self) -> device:\n \"\"\"\n `torch.device`: The device on which the module is (assuming that all the module parameters are on the same\n device).\n \"\"\"\n return get_parameter_device(self)\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n \"\"\"\n return get_parameter_dtype(self)\n\n def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:\n \"\"\"\n Get number of (trainable or non-embedding) parameters in the module.\n\n Args:\n only_trainable (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of trainable parameters.\n exclude_embeddings (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of non-embedding parameters.\n\n Returns:\n `int`: The number of parameters.\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n model_id = \"runwayml/stable-diffusion-v1-5\"\n unet = UNet2DConditionModel.from_pretrained(model_id, subfolder=\"unet\")\n unet.num_parameters(only_trainable=True)\n 859520964\n ```\n \"\"\"\n\n if exclude_embeddings:\n embedding_param_names = [\n f\"{name}.weight\"\n for name, module_type in self.named_modules()\n if isinstance(module_type, torch.nn.Embedding)\n ]\n non_embedding_parameters = [\n parameter for name, parameter in self.named_parameters() if name not in embedding_param_names\n ]\n return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)\n else:\n return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)\n\n def _convert_deprecated_attention_blocks(self, state_dict):\n deprecated_attention_block_paths = []\n\n def recursive_find_attn_block(name, module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_paths.append(name)\n\n for sub_name, sub_module in module.named_children():\n sub_name = sub_name if name == \"\" else f\"{name}.{sub_name}\"\n recursive_find_attn_block(sub_name, sub_module)\n\n recursive_find_attn_block(\"\", self)\n\n # NOTE: we have to check if the deprecated parameters are in the state dict\n # because it is possible we are loading from a state dict that was already\n # converted\n\n for path in deprecated_attention_block_paths:\n # group_norm path stays the same\n\n # query -> to_q\n if f\"{path}.query.weight\" in state_dict:\n state_dict[f\"{path}.to_q.weight\"] = state_dict.pop(f\"{path}.query.weight\")\n if f\"{path}.query.bias\" in state_dict:\n state_dict[f\"{path}.to_q.bias\"] = state_dict.pop(f\"{path}.query.bias\")\n\n # key -> to_k\n if f\"{path}.key.weight\" in state_dict:\n state_dict[f\"{path}.to_k.weight\"] = state_dict.pop(f\"{path}.key.weight\")\n if f\"{path}.key.bias\" in state_dict:\n state_dict[f\"{path}.to_k.bias\"] = state_dict.pop(f\"{path}.key.bias\")\n\n # value -> to_v\n if f\"{path}.value.weight\" in state_dict:\n state_dict[f\"{path}.to_v.weight\"] = state_dict.pop(f\"{path}.value.weight\")\n if f\"{path}.value.bias\" in state_dict:\n state_dict[f\"{path}.to_v.bias\"] = state_dict.pop(f\"{path}.value.bias\")\n\n # proj_attn -> to_out.0\n if f\"{path}.proj_attn.weight\" in state_dict:\n state_dict[f\"{path}.to_out.0.weight\"] = state_dict.pop(f\"{path}.proj_attn.weight\")\n if f\"{path}.proj_attn.bias\" in state_dict:\n state_dict[f\"{path}.to_out.0.bias\"] = state_dict.pop(f\"{path}.proj_attn.bias\")\n\n def _temp_convert_self_to_deprecated_attention_blocks(self):\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.query = module.to_q\n module.key = module.to_k\n module.value = module.to_v\n module.proj_attn = module.to_out[0]\n\n # We don't _have_ to delete the old attributes, but it's helpful to ensure\n # that _all_ the weights are loaded into the new attributes and we're not\n # making an incorrect assumption that this model should be converted when\n # it really shouldn't be.\n del module.to_q\n del module.to_k\n del module.to_v\n del module.to_out\n\n def _undo_temp_convert_self_to_deprecated_attention_blocks(self):\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.to_q = module.query\n module.to_k = module.key\n module.to_v = module.value\n module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)])\n\n del module.query\n del module.key\n del module.value\n del module.proj_attn" } ]
from dataclasses import dataclass from typing import Optional from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin import torch
20,040
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class TransformerTemporalModelOutput(BaseOutput): """ The output of [`TransformerTemporalModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. """ sample: torch.FloatTensor
class TransformerTemporalModel(ModelMixin, ConfigMixin):
0
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_name))\n # * Get dataset\n if dataset_mode == \"ubcfashion\":\n dataset = UBCFasionDataset(\n data_root=\"./data/ubcfashion/\",\n video_list=[seq_name],\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"people_snapshot\":\n dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n print(\"Load Instant Avatar processed PeopleSnapshot\")\n elif dataset_mode == \"zju\":\n dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n elif dataset_mode == \"instant_avatar_wild\":\n # assert image_zoom_ratio == 1.0, \"Check! in the wild data should use 1.0\"\n if image_zoom_ratio != 1.0:\n logging.warning(\n f\"Check! in the wild data should use 1.0, but got {image_zoom_ratio}\"\n )\n dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"dog_demo\":\n dataset = DogDemoDataset(data_root=\"./data/dog_data_official/\", video_name=seq_name)\n else:\n raise NotImplementedError(\"Unknown mode: {}\".format(dataset_mode))\n\n # prepare an optimizable data provider\n optimizable_data_provider = RealDataOptimizablePoseProviderPose(\n dataset,\n balance=balance,\n )\n return optimizable_data_provider, dataset" }, { "identifier": "DatabasePoseProvider", "path": "lib_data/data_provider.py", "snippet": "class DatabasePoseProvider(nn.Module):\n def __init__(\n self,\n pose_dirs: list,\n da_pose_prob=0.1,\n da_range=[0.0, np.pi / 4],\n device=torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.device = device\n self.base_R = matrix_to_axis_angle(\n torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, \"sxyz\"))[None]\n )[0]\n self.base_R = self.base_R.float().to(self.device)\n\n self.da_pose_prob = da_pose_prob\n self.da_range = da_range\n\n self.data = []\n\n # cache the poses\n for d in pose_dirs:\n print(f\"Caching {d} ...\")\n for subject in tqdm(os.listdir(d)):\n sub_dir = os.path.join(d, subject)\n if not os.path.isdir(sub_dir):\n continue\n npz_files = [f for f in os.listdir(sub_dir) if f.endswith(\".npz\")]\n npz_files.sort()\n for fn in npz_files:\n try:\n npz_fn = os.path.join(sub_dir, fn)\n pose_data = np.load(npz_fn)\n amass_len = pose_data[\"poses\"].shape[0]\n smplx_to_smpl = list(range(66)) + [72, 73, 74, 117, 118, 119]\n poses = pose_data[\"poses\"][:, smplx_to_smpl].reshape(\n amass_len, 24, 3\n )\n self.data.append(poses.astype(np.float16))\n except:\n # print(f\"Error in {npz_fn}, skip!\")\n pass\n self.data = np.concatenate(self.data, axis=0)\n print(\n f\"Database has poses {len(self.data)} with DA-pose prob {self.da_pose_prob} and range {self.da_range}\"\n )\n return\n\n def forward(self, N: int):\n pose, trans = self.sample_pose(N)\n return pose, trans\n\n def sample_pose(self, N: int):\n # da pose\n pose_list = []\n for i in range(N):\n seed = np.random.rand()\n if seed > self.da_pose_prob:\n # from database\n idx = np.random.randint(len(self.data))\n pose = torch.from_numpy(self.data[idx]).float().to(self.device)\n else:\n # da pose\n pose = torch.zeros(24, 3).to(self.device)\n da_theta = float(np.random.uniform(*self.da_range))\n pose[1, -1] = da_theta\n pose[2, -1] = -da_theta\n pose[0] = self.base_R\n pose_list.append(pose)\n pose = torch.stack(pose_list, dim=0)\n trans = torch.zeros(N, 3).to(self.device)\n return pose, trans" }, { "identifier": "get_template", "path": "lib_gart/templates.py", "snippet": "def get_template(\n mode, init_beta, cano_pose_type, voxel_deformer_res, template_model_path=None\n):\n if mode == \"human\":\n template = SMPLTemplate(\n smpl_model_path=template_model_path,\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n elif mode == \"dog\":\n template = SMALTemplate(\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n else:\n raise ValueError(f\"Unknown mode {mode}\")\n return template" }, { "identifier": "GaussianTemplateModel", "path": "lib_gart/model.py", "snippet": "class GaussianTemplateModel(nn.Module):\n def __init__(\n self,\n template,\n add_bones: AdditionalBones,\n ##################################\n # attr config\n w_correction_flag=True,\n # w_rest_dim=0, # additional skinnign weight\n f_localcode_dim=0,\n max_sph_order=0,\n w_memory_type=\"point\",\n ##################################\n max_scale=0.1, # use sigmoid activation, can't be too large\n min_scale=0.0,\n # geo init\n init_mode=\"on_mesh\",\n opacity_init_value=0.9, # the init value of opacity\n # on mesh init params\n onmesh_init_subdivide_num=0,\n onmesh_init_scale_factor=1.0,\n onmesh_init_thickness_factor=0.5,\n # near mesh init params\n scale_init_value=0.01, # the init value of scale\n nearmesh_init_num=10000,\n nearmesh_init_std=0.1,\n ##################################\n ) -> None:\n super().__init__()\n\n self.template = template\n self.num_bones = template.voxel_deformer.num_bones\n self.add_bones = add_bones\n self.num_add_bones = add_bones.num_bones\n\n self.max_scale = max_scale\n self.min_scale = min_scale\n self._init_act(self.max_scale, self.min_scale)\n self.opacity_init_logit = self.o_inv_act(opacity_init_value)\n\n # * init geometry\n if init_mode == \"on_mesh\":\n x, q, s, o = get_on_mesh_init_geo_values(\n template,\n on_mesh_subdivide=onmesh_init_subdivide_num,\n scale_init_factor=onmesh_init_scale_factor,\n thickness_init_factor=onmesh_init_thickness_factor,\n max_scale=max_scale,\n min_scale=min_scale,\n s_inv_act=self.s_inv_act,\n opacity_init_logit=self.opacity_init_logit,\n )\n elif init_mode == \"near_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_near_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n random_init_std=nearmesh_init_std,\n )\n elif init_mode == \"in_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_inside_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n )\n else:\n raise NotImplementedError(f\"Unknown init_mode {init_mode}\")\n self._xyz = nn.Parameter(x)\n self._rotation = nn.Parameter(q)\n self._scaling = nn.Parameter(s)\n self._opacity = nn.Parameter(o)\n\n # * init attributes\n self.w_memory_type = w_memory_type\n assert self.w_memory_type in [\"point\", \"voxel\"], f\"Unknown {w_memory_type}\"\n\n self.max_sph_order = max_sph_order\n self.w_dc_dim = self.template.dim if w_correction_flag else 0\n self.w_rest_dim = self.add_bones.num_bones\n self.f_localcode_dim = f_localcode_dim\n\n sph_rest_dim = 3 * (sph_order2nfeat(self.max_sph_order) - 1)\n self._features_dc = nn.Parameter(torch.zeros_like(self._xyz))\n self._features_rest = nn.Parameter(torch.zeros(self.N, sph_rest_dim))\n\n # * Different implementation of smoothness\n if self.w_memory_type == \"point\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, self.w_dc_dim))\n self._w_correction_rest = nn.Parameter(\n torch.ones(self.N, self.w_rest_dim) * 1e-4\n )\n elif self.w_memory_type == \"voxel\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, 0))\n self._w_correction_rest = nn.Parameter(torch.zeros(self.N, 0))\n if self.w_dc_dim > 0:\n self.template.voxel_deformer.enable_voxel_correction()\n if self.w_rest_dim > 0:\n self.template.voxel_deformer.enable_additional_correction(\n self.w_rest_dim\n )\n elif self.w_memory_type == \"hash\":\n raise NotImplementedError(\"TODO\")\n else:\n raise NotImplementedError(f\"Unknown {w_memory_type}\")\n\n self._features_localcode = nn.Parameter(\n torch.zeros(self.N, self.f_localcode_dim)\n )\n\n assert self.f_localcode_dim == 0, \"TODO, add local mlp ablation\"\n\n # * States\n # warning, our code use N, instead of (N,1) as in GS code\n self.register_buffer(\"xyz_gradient_accum\", torch.zeros(self.N).float())\n self.register_buffer(\"xyz_gradient_denom\", torch.zeros(self.N).long())\n self.register_buffer(\"max_radii2D\", torch.zeros(self.N).float())\n\n self.op_update_exclude = [\"add_bones\"]\n if self.w_memory_type != \"point\":\n self.op_update_exclude.extend([\"w_dc_vox\", \"w_rest_vox\"])\n # self.summary()\n return\n\n def summary(self):\n # logging.info number of parameters per pytorch sub module\n msg = \"\"\n for name, param in self.named_parameters():\n if name.startswith(\"add_bones\"):\n continue # compact print\n msg = msg + f\"[{name}:{param.numel()/1e3:.1f}K] \" \n # logging.info(f\"{name}, {param.numel()/1e6:.3f}M\")\n logging.info(msg)\n return\n\n def _init_act(self, max_s_value, min_s_value):\n def s_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return min_s_value + torch.sigmoid(x) * (max_s_value - min_s_value)\n\n def s_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n y = (x - min_s_value) / (max_s_value - min_s_value) + 1e-5\n y = torch.logit(y)\n assert not torch.isnan(\n y\n ).any(), f\"{x.min()}, {x.max()}, {y.min()}, {y.max()}\"\n return y\n\n def o_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.sigmoid(x)\n\n def o_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.logit(x)\n\n self.s_act = s_act\n self.s_inv_act = s_inv_act\n self.o_act = o_act\n self.o_inv_act = o_inv_act\n\n return\n\n @property\n def N(self):\n return len(self._xyz)\n\n @property\n def get_x(self):\n return self._xyz\n\n @property\n def get_R(self):\n return quaternion_to_matrix(self._rotation)\n\n @property\n def get_o(self):\n return self.o_act(self._opacity)\n\n @property\n def get_s(self):\n return self.s_act(self._scaling)\n\n @property\n def get_c(self):\n return torch.cat([self._features_dc, self._features_rest], dim=-1)\n\n def cache_for_fast(self):\n _cached_W, _ = self.template.forward(None, self._xyz)\n self._cached_W = _cached_W.detach().clone()\n return\n\n def forward(\n self, theta, trans, additional_dict={}, active_sph_order=None, fast=False\n ):\n # * fast will use the cached per point attr, no query anymore\n # TODO: the additional dict contain info to do flexible skinning: it can contain the As directly for optimization, or it can contain t index to query some buffers to provide As, or it can contain t along with the input theta to query some MLP;\n\n # TODO: if use vol memory, every forward update self.xxx, and remove them from parameters, pretend that the attributes are per point, but actually they are queried every forward\n\n # theta: B,24,3; trans: B,3\n B = len(theta)\n if active_sph_order is None:\n active_sph_order = self.max_sph_order\n else:\n assert (\n active_sph_order <= self.max_sph_order\n ), \"active_sph_order should be smaller\"\n sph_dim = 3 * sph_order2nfeat(active_sph_order)\n\n xyz = self.get_x\n mu_can = xyz\n frame_can = self.get_R\n s = self.get_s\n o = self.get_o\n sph = self.get_c[:, :sph_dim]\n\n mu_can = mu_can[None].expand(B, -1, -1)\n frame_can = frame_can[None].expand(B, -1, -1, -1)\n\n if fast:\n # only forward skeleton, no query voxel\n _, A = self.template.forward(theta, None)\n W = self._cached_W[None].expand(B, -1, -1)\n else:\n W, A = self.template.forward(theta, mu_can)\n if self._w_correction_dc.shape[-1] > 0:\n W = W + self._w_correction_dc[None]\n T = torch.einsum(\"bnj, bjrc -> bnrc\", W[..., : self.num_bones], A)\n\n # * additional correction here\n if \"pose\" not in additional_dict.keys():\n # maybe later we want to viz the different pose effect in cano\n additional_dict[\"pose\"] = theta.reshape(B, -1)[:, 3:]\n add_A = self.add_bones(**additional_dict)\n if add_A is not None:\n if theta.ndim == 2:\n global_axis_angle = theta[:, :3]\n else:\n global_axis_angle = theta[:, 0]\n global_orient_action = self.template.get_rot_action(global_axis_angle) # B,4,4\n add_A = torch.einsum(\"bij, bnjk -> bnik\", global_orient_action, add_A)\n\n if self.w_memory_type == \"point\":\n assert self._w_correction_rest.shape[-1] > 0\n add_W = self._w_correction_rest[None].expand(B, -1, -1)\n elif self.w_memory_type == \"voxel\":\n add_W = W[..., self.num_bones :]\n\n add_T = torch.einsum(\"bnj, bjrc -> bnrc\", add_W, add_A)\n T = T + add_T # Linear\n additional_dict[\"As\"] = add_A\n\n R, t = T[:, :, :3, :3], T[:, :, :3, 3] # B,N,3,3; B,N,3\n\n mu = torch.einsum(\"bnij,bnj->bni\", R, mu_can) + t # B,N,3\n frame = torch.einsum(\"bnij,bnjk->bnik\", R, frame_can) # B,N,3,3\n\n s = s[None].expand(B, -1, -1) # B,N,1\n o = o[None].expand(B, -1, -1) # B,N,1\n sph = sph[:, :sph_dim][None].expand(B, -1, -1) # B,N,C\n\n mu = mu + trans[:, None, :]\n\n return mu, frame, s, o, sph, additional_dict\n\n def compute_reg(self, K):\n # !can cancel the knn, but the w reg is critical\n if K > 0:\n xyz = self._xyz\n # todo: this can be cached and updated every several steps!!\n dist_sq, nn_ind, _ = knn_points(xyz[None], xyz[None], K=K, return_nn=False)\n nn_ind = nn_ind.squeeze(0)\n # reg the std inside knn\n q = self._rotation[nn_ind, :] # N,K,4\n s = self.get_s[nn_ind, :] # N,K,3\n o = self.get_o[nn_ind, :] # N,K,1\n q_std = q.std(dim=1).mean()\n s_std = s.std(dim=1).mean()\n o_std = o.std(dim=1).mean()\n\n cd = self._features_dc[nn_ind, :] # N,K,3\n ch = self._features_rest[nn_ind, :] # N,K,C\n cd_std = cd.std(dim=1).mean()\n ch_std = ch.std(dim=1).mean()\n if ch.shape[-1] == 0:\n ch_std = torch.zeros_like(ch_std)\n\n w = self._w_correction_dc[nn_ind, :] # N,K,3\n w_rest = self._w_correction_rest[nn_ind, :] # N,K,C\n f = self._features_localcode[nn_ind, :] # N,K,C\n w_std = w.std(dim=1).mean()\n w_rest_std = w_rest.std(dim=1).mean()\n f_std = f.std(dim=1).mean()\n if w.shape[-1] == 0:\n w_std = torch.zeros_like(cd_std)\n if w_rest.shape[-1] == 0:\n w_rest_std = torch.zeros_like(cd_std)\n if f.shape[-1] == 0:\n f_std = torch.zeros_like(cd_std)\n else:\n dummy = torch.zeros(1).to(self._xyz).squeeze()\n q_std, s_std, o_std = dummy, dummy, dummy\n cd_std, ch_std = dummy, dummy\n w_std, w_rest_std, f_std = dummy, dummy, dummy\n dist_sq = dummy\n\n w_norm = self._w_correction_dc.norm(dim=-1).mean() # N\n w_rest_norm = self._w_correction_rest.norm(dim=-1).mean() # N\n\n if self.w_memory_type == \"voxel\":\n # update the w related std and norm\n w_std = self.template.voxel_deformer.get_tv(\"dc\")\n w_rest_std = self.template.voxel_deformer.get_tv(\"rest\")\n w_norm = self.template.voxel_deformer.get_mag(\"dc\")\n w_rest_norm = self.template.voxel_deformer.get_mag(\"rest\")\n\n max_s_square = torch.mean((self.get_s.max(dim=1).values) ** 2)\n\n return (\n q_std,\n s_std,\n o_std,\n cd_std,\n ch_std,\n w_std,\n w_rest_std,\n f_std,\n w_norm,\n w_rest_norm,\n dist_sq.mean(),\n max_s_square,\n )\n\n def get_optimizable_list(\n self,\n lr_p=0.00016,\n lr_q=0.001,\n lr_s=0.005,\n lr_o=0.05,\n lr_sph=0.0025,\n lr_sph_rest=None,\n lr_w=0.001,\n lr_w_rest=0.001,\n lr_f=0.0001,\n ):\n lr_sph_rest = lr_sph / 20 if lr_sph_rest is None else lr_sph_rest\n l = [\n {\"params\": [self._xyz], \"lr\": lr_p, \"name\": \"xyz\"},\n {\"params\": [self._opacity], \"lr\": lr_o, \"name\": \"opacity\"},\n {\"params\": [self._scaling], \"lr\": lr_s, \"name\": \"scaling\"},\n {\"params\": [self._rotation], \"lr\": lr_q, \"name\": \"rotation\"},\n {\"params\": [self._features_dc], \"lr\": lr_sph, \"name\": \"f_dc\"},\n {\"params\": [self._features_rest], \"lr\": lr_sph_rest, \"name\": \"f_rest\"},\n {\"params\": [self._w_correction_dc], \"lr\": lr_w, \"name\": \"w_dc\"},\n {\"params\": [self._w_correction_rest], \"lr\": lr_w_rest, \"name\": \"w_rest\"},\n {\"params\": [self._features_localcode], \"lr\": lr_f, \"name\": \"f_localcode\"},\n ]\n if self.w_memory_type == \"voxel\":\n if self.w_dc_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.voxel_w_correction],\n \"lr\": lr_w,\n \"name\": \"w_dc_vox\",\n }\n )\n if self.w_rest_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.additional_correction],\n \"lr\": lr_w_rest,\n \"name\": \"w_rest_vox\",\n }\n )\n return l\n\n # * Gaussian Control\n def record_xyz_grad_radii(self, viewspace_point_tensor, radii, update_filter):\n # Record the gradient norm, invariant across different poses\n assert len(viewspace_point_tensor) == self.N\n self.xyz_gradient_accum[update_filter] += torch.norm(\n viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=False\n )\n self.xyz_gradient_denom[update_filter] += 1\n self.max_radii2D[update_filter] = torch.max(\n self.max_radii2D[update_filter], radii[update_filter]\n )\n return\n\n def _densification_postprocess(\n self,\n optimizer,\n new_xyz,\n new_r,\n new_s,\n new_o,\n new_sph_dc,\n new_sph_rest,\n new_w_dc,\n new_w_rest,\n new_localcode,\n ):\n d = {\n \"xyz\": new_xyz,\n \"f_dc\": new_sph_dc,\n \"f_rest\": new_sph_rest,\n \"opacity\": new_o,\n \"scaling\": new_s,\n \"rotation\": new_r,\n \"w_dc\": new_w_dc,\n \"w_rest\": new_w_rest,\n \"f_localcode\": new_localcode,\n }\n d = {k: v for k, v in d.items() if v is not None}\n\n # First cat to optimizer and then return to self\n optimizable_tensors = cat_tensors_to_optimizer(optimizer, d)\n\n self._xyz = optimizable_tensors[\"xyz\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.xyz_gradient_denom = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.max_radii2D = torch.cat(\n [self.max_radii2D, torch.zeros_like(new_xyz[:, 0])], dim=0\n )\n return\n\n def _densify_and_clone(self, optimizer, grad_norm, grad_threshold, scale_th):\n # Extract points that satisfy the gradient condition\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(self.get_s, dim=1).values <= scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n new_xyz = self._xyz[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n new_scaling = self._scaling[selected_pts_mask]\n new_opacities = self._opacity[selected_pts_mask]\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n return len(new_xyz)\n\n def _densify_and_split(\n self,\n optimizer,\n grad_norm,\n grad_threshold,\n scale_th,\n N=2,\n ):\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(_scaling, dim=1).values > scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n stds = _scaling[selected_pts_mask].repeat(N, 1)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = quaternion_to_matrix(self._rotation[selected_pts_mask]).repeat(N, 1, 1)\n new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self._xyz[\n selected_pts_mask\n ].repeat(N, 1)\n new_scaling = _scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N)\n new_scaling = torch.clamp(new_scaling, max=self.max_scale, min=self.min_scale)\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(N, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(N, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(N, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(N, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(N * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n return len(new_xyz)\n\n def densify(self, optimizer, max_grad, percent_dense, extent, verbose=True):\n grads = self.xyz_gradient_accum / self.xyz_gradient_denom\n grads[grads.isnan()] = 0.0\n\n # n_clone = self._densify_and_clone(optimizer, grads, max_grad)\n n_clone = self._densify_and_clone(\n optimizer, grads, max_grad, percent_dense * extent\n )\n n_split = self._densify_and_split(\n optimizer, grads, max_grad, percent_dense * extent, N=2\n )\n\n if verbose:\n logging.info(f\"Densify: Clone[+] {n_clone}, Split[+] {n_split}\")\n # logging.info(f\"Densify: Clone[+] {n_clone}\")\n # torch.cuda.empty_cache()\n return\n\n def random_grow(self, optimizer, num_factor=0.05, std=0.1, init_opa_value=0.1):\n # * New operation, randomly add largely disturbed points to the geometry\n ind = torch.randperm(self.N)[: int(self.N * num_factor)]\n selected_pts_mask = torch.zeros(self.N, dtype=bool, device=\"cuda\")\n selected_pts_mask[ind] = True\n\n new_xyz = self._xyz[selected_pts_mask]\n noise = torch.randn_like(new_xyz) * std\n new_xyz = new_xyz + noise\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n\n new_opacities = torch.ones_like(self._opacity[selected_pts_mask])\n new_opacities = new_opacities * self.o_inv_act(init_opa_value)\n\n new_scaling = self._scaling[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n logging.info(f\"Random grow: {len(new_xyz)}\")\n return len(new_xyz)\n\n def prune_points(self, optimizer, min_opacity, max_screen_size, verbose=True):\n opacity = self.o_act(self._opacity)\n prune_mask = (opacity < min_opacity).squeeze()\n if max_screen_size: # if a point is too large\n big_points_vs = self.max_radii2D > max_screen_size\n prune_mask = torch.logical_or(prune_mask, big_points_vs)\n # * reset the maxRadii\n self.max_radii2D = torch.zeros_like(self.max_radii2D)\n self._prune_points(optimizer, prune_mask)\n if verbose:\n logging.info(f\"Prune: {prune_mask.sum()}\")\n\n def _prune_points(self, optimizer, mask):\n valid_points_mask = ~mask\n optimizable_tensors = prune_optimizer(\n optimizer,\n valid_points_mask,\n exclude_names=self.op_update_exclude,\n )\n\n self._xyz = optimizable_tensors[\"xyz\"]\n if getattr(self, \"color_memory\", None) is None:\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]\n self.xyz_gradient_denom = self.xyz_gradient_denom[valid_points_mask]\n self.max_radii2D = self.max_radii2D[valid_points_mask]\n # torch.cuda.empty_cache()\n return\n\n @torch.no_grad()\n def regaussian(self, optimizer, max_scale=0.03):\n # raise NotImplementedError(\"TODO, like split\")\n # * New operation, manually split the large gaussians with smaller ones to approximate\n # * Now, try bi-split\n\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n\n step = 0\n before_num = self.N\n while selected_pts_mask.any():\n # This can be done more than 3 times, becuase there may be huge gaussians, which should be devided several times\n fg_xyz = self._xyz[selected_pts_mask]\n fg_scale = _scaling[selected_pts_mask]\n fg_frame = quaternion_to_matrix(self._rotation[selected_pts_mask])\n # each column is the direction of axis in global frame\n axis_ind = torch.argmax(fg_scale, dim=1)\n axis_scale = fg_scale.max(dim=1).values\n # select column\n axis_dir = torch.gather(\n fg_frame, dim=2, index=axis_ind[:, None, None].expand(-1, 3, -1)\n ).squeeze(\n -1\n ) # N,3\n new_x1 = fg_xyz + axis_dir.squeeze() * axis_scale[:, None] / 2.0\n new_x2 = fg_xyz - axis_dir.squeeze() * axis_scale[:, None] / 2.0\n # Repeat will change [1,2,3...] to [1,2,3..., 1,2,3...]\n new_xyz = torch.cat([new_x1, new_x2], dim=0).reshape(-1, 3)\n new_scaling = _scaling[selected_pts_mask]\n new_scaling = torch.scatter(\n new_scaling,\n dim=1,\n index=axis_ind[:, None],\n src=axis_scale[:, None] / 2.0,\n ).repeat(2, 1)\n new_scaling = torch.clamp(\n new_scaling, max=self.max_scale, min=self.min_scale\n )\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(2, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(2, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(2, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(2, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(2, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(2, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(2, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz.float(),\n new_r=new_rotation.float(),\n new_s=new_scaling.float(),\n new_o=new_opacities.float(),\n new_sph_dc=new_features_dc.float(),\n new_sph_rest=new_features_rest.float(),\n new_w_dc=new_w_dc.float(),\n new_w_rest=new_w_rest.float(),\n new_localcode=new_localcode.float(),\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(2 * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n\n step += 1\n logging.info(\n f\"Regaussian-[{step}], {selected_pts_mask.sum()} ({selected_pts_mask.float().mean()*100}% pts-scale>{max_scale})\"\n )\n\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n logging.info(f\"Re-gaussian: {before_num} -> {self.N}\")\n return\n\n def reset_opacity(self, optimizer, value=0.01, verbose=True):\n opacities_new = self.o_inv_act(\n torch.min(self.o_act(self._opacity), torch.ones_like(self._opacity) * value)\n )\n optimizable_tensors = replace_tensor_to_optimizer(\n optimizer, opacities_new, \"opacity\"\n )\n if verbose:\n logging.info(f\"Reset opacity to {value}\")\n self._opacity = optimizable_tensors[\"opacity\"]\n\n def load(self, ckpt):\n # because N changed, have to re-init the buffers\n self._xyz = nn.Parameter(torch.as_tensor(ckpt[\"_xyz\"], dtype=torch.float32))\n\n self._features_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_dc\"], dtype=torch.float32)\n )\n self._features_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_rest\"], dtype=torch.float32)\n )\n self._opacity = nn.Parameter(\n torch.as_tensor(ckpt[\"_opacity\"], dtype=torch.float32)\n )\n self._scaling = nn.Parameter(\n torch.as_tensor(ckpt[\"_scaling\"], dtype=torch.float32)\n )\n self._rotation = nn.Parameter(\n torch.as_tensor(ckpt[\"_rotation\"], dtype=torch.float32)\n )\n self._w_correction_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_dc\"], dtype=torch.float32)\n )\n self._w_correction_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_rest\"], dtype=torch.float32)\n )\n self._features_localcode = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_localcode\"], dtype=torch.float32)\n )\n self.xyz_gradient_accum = torch.as_tensor(\n ckpt[\"xyz_gradient_accum\"], dtype=torch.float32\n )\n self.xyz_gradient_denom = torch.as_tensor(\n ckpt[\"xyz_gradient_denom\"], dtype=torch.int64\n )\n self.max_radii2D = torch.as_tensor(ckpt[\"max_radii2D\"], dtype=torch.float32)\n\n # * add bones may have different total_t\n if \"add_bones.dt_list\" in ckpt.keys():\n self.add_bones.total_t = ckpt[\"add_bones.dt_list\"].shape[0]\n self.add_bones.dt_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dt_list\"], dtype=torch.float32)\n )\n self.add_bones.dr_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dr_list\"], dtype=torch.float32)\n )\n # load others\n self.load_state_dict(ckpt, strict=True)\n # this is critical, reinit the funcs\n self._init_act(self.max_scale, self.min_scale)\n return" }, { "identifier": "AdditionalBones", "path": "lib_gart/model.py", "snippet": "class AdditionalBones(nn.Module):\n def __init__(\n self, # additional bones\n num_bones: int = 0,\n total_t: int = 0, # any usage of time should use this!\n mode=\"pose-mlp\",\n # pose-mlp\n pose_dim=23 * 3,\n mlp_hidden_dims=[256, 256, 256, 256],\n mlp_act=nn.LeakyReLU,\n # pose+t-mlp\n ):\n super().__init__()\n self.num_bones = num_bones\n if self.num_bones == 0:\n return\n self.mode = mode\n assert self.mode in [\"pose-mlp\", \"pose+t-mlp\", \"delta-list\", \"list\"]\n self.total_t = total_t\n\n if self.mode == \"pose-mlp\":\n self.pose_dim = pose_dim\n self.mlp_layers = nn.ModuleList()\n c_in = self.pose_dim\n for c_out in mlp_hidden_dims:\n self.mlp_layers.append(nn.Sequential(nn.Linear(c_in, c_out), mlp_act()))\n c_in = c_out\n self.mlp_output_head = nn.Linear(c_in, 7 * self.num_bones, bias=False)\n with torch.no_grad():\n self.mlp_output_head.weight.data = (\n torch.randn_like(self.mlp_output_head.weight.data) * 1e-3\n )\n elif self.mode == \"delta-list\":\n self.dr_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n self.dt_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n else:\n raise NotImplementedError()\n\n return\n\n def forward(self, pose=None, t=None, As=None):\n if self.num_bones == 0:\n # * No additional bones\n return None\n if As is not None:\n # * Directly return if As already provided\n return As\n if self.mode == \"pose-mlp\":\n assert pose is not None\n assert pose.ndim == 2 and pose.shape[1] == self.pose_dim\n B = len(pose)\n x = pose\n for layer in self.mlp_layers:\n x = layer(x)\n x = self.mlp_output_head(x).reshape(B, -1, 7)\n q, t = x[:, :, :4], x[:, :, 4:]\n q[..., 0] = q[..., 0] + 1.0\n q = F.normalize(q, dim=-1)\n R = quaternion_to_matrix(q)\n Rt = torch.cat([R, t[:, :, :, None]], dim=-1)\n bottom = torch.zeros_like(Rt[:, :, 0:1])\n bottom[:, :, :, -1] = 1.0\n As = torch.cat([Rt, bottom], dim=2)\n return As\n elif self.mode == \"delta-list\":\n As = self._roll_out_continuous_T()\n if t is None:\n B = len(pose)\n # # ! If no time is set, now return eye(4)\n # ret = (\n # torch.eye(4)\n # .to(As.device)[None, None]\n # .repeat(B, self.num_bones, 1, 1)\n # )\n # ! If no time is set, now return first frame\n ret = As[0][None].repeat(B, 1, 1, 1)\n else:\n if isinstance(t, int):\n t = torch.tensor([t]).to(As.device)\n ret = As[t]\n return ret\n else:\n raise NotImplementedError()\n\n return # As in canonical frame\n\n def _roll_out_continuous_T(self):\n # ! this assumes continuous frames, single frame!\n R = axis_angle_to_matrix(self.dr_list)\n dT = (\n torch.eye(4).to(R.device)[None, None].repeat(self.total_t, R.shape[1], 1, 1)\n )\n dT[:, :, :3, :3] = dT[:, :, :3, :3] * 0 + R\n dT[:, :, :3, 3] = dT[:, :, :3, 3] * 0 + self.dt_list\n T = [dT[0]]\n for i in range(1, self.total_t):\n T.append(torch.einsum(\"nij, njk->nik\", T[-1], dT[i]))\n T = torch.stack(T, dim=0)\n return T" }, { "identifier": "render_cam_pcl", "path": "lib_render/gauspl_renderer.py", "snippet": "def render_cam_pcl(\n xyz,\n frame,\n scale,\n opacity,\n color_feat,\n H,\n W,\n CAM_K,\n verbose=False,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n # ! Camera is at origin, every input is in camera coordinate space\n\n S = torch.zeros_like(frame)\n S[:, 0, 0] = scale[:, 0]\n S[:, 1, 1] = scale[:, 1]\n S[:, 2, 2] = scale[:, 2]\n actual_covariance = frame @ (S**2) @ frame.permute(0, 2, 1)\n\n # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means\n device = xyz.device\n screenspace_points = (\n torch.zeros_like(xyz, dtype=xyz.dtype, requires_grad=True, device=xyz.device) + 0\n )\n # screenspace_points.retain_grad()\n try:\n screenspace_points.retain_grad()\n except:\n pass\n\n # * Specially handle the non-centered camera, using first padding and finally crop\n if abs(H // 2 - CAM_K[1, 2]) > 1.0 or abs(W // 2 - CAM_K[0, 2]) > 1.0:\n center_handling_flag = True\n left_w, right_w = CAM_K[0, 2], W - CAM_K[0, 2]\n top_h, bottom_h = CAM_K[1, 2], H - CAM_K[1, 2]\n new_W = int(2 * max(left_w, right_w))\n new_H = int(2 * max(top_h, bottom_h))\n else:\n center_handling_flag = False\n new_W, new_H = W, H\n\n # Set up rasterization configuration\n FoVx = focal2fov(CAM_K[0, 0], new_W)\n FoVy = focal2fov(CAM_K[1, 1], new_H)\n tanfovx = math.tan(FoVx * 0.5)\n tanfovy = math.tan(FoVy * 0.5)\n\n # TODO: Check dynamic gaussian repos and original gaussian repo, they use projection matrix to handle non-centered K, not using this stupid padding like me\n viewmatrix = torch.from_numpy(getWorld2View2(np.eye(3), np.zeros(3)).transpose(0, 1)).to(device)\n projection_matrix = (\n getProjectionMatrix(znear=0.01, zfar=1.0, fovX=FoVx, fovY=FoVy).transpose(0, 1).to(device)\n )\n full_proj_transform = (viewmatrix.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)\n camera_center = viewmatrix.inverse()[3, :3]\n\n raster_settings = GaussianRasterizationSettings(\n image_height=new_H,\n image_width=new_W,\n tanfovx=tanfovx,\n tanfovy=tanfovy,\n bg=torch.tensor(bg_color, dtype=torch.float32, device=device),\n scale_modifier=1.0,\n viewmatrix=viewmatrix,\n projmatrix=full_proj_transform,\n sh_degree=0, # ! use pre-compute color!\n campos=camera_center,\n prefiltered=False,\n debug=False,\n )\n rasterizer = GaussianRasterizer(raster_settings=raster_settings)\n\n means3D = xyz\n means2D = screenspace_points\n # opacity = torch.ones_like(means3D[:, 0]) * sigma\n\n # If precomputed 3d covariance is provided, use it. If not, then it will be computed from\n # scaling / rotation by the rasterizer.\n scales = None\n rotations = None\n # JH\n cov3D_precomp = strip_lowerdiag(actual_covariance)\n\n # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors\n # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.\n # xyz are in camera frame, so the dir in camera frame is just their normalized direction\n dir_cam = F.normalize(xyz, dim=-1)\n # P_w = Frame @ P_local\n dir_local = torch.einsum(\"nji,nj->ni\", frame, dir_cam) # note the transpose\n dir_local = F.normalize(\n dir_local, dim=-1\n ) # If frame is not SO(3) but Affinity, have to normalize\n N = len(color_feat)\n shs_view = color_feat.reshape(N, -1, 3) # N, Deg, Channels\n sh2rgb = eval_sh(active_sph_order, shs_view.permute(0, 2, 1), dir_local)\n colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)\n # colors_precomp = color_feat\n\n # Rasterize visible Gaussians to image, obtain their radii (on screen).\n\n start_time = time.time()\n ret = rasterizer(\n means3D=means3D.float(),\n means2D=means2D.float(),\n shs=None,\n colors_precomp=colors_precomp.float(),\n opacities=opacity.float(),\n scales=scales,\n rotations=rotations,\n cov3D_precomp=cov3D_precomp.float(),\n )\n if len(ret) == 2:\n rendered_image, radii = ret\n depth, alpha = None, None\n elif len(ret) == 4:\n rendered_image, radii, depth, alpha = ret\n else:\n raise ValueError(f\"Unexpected return value from rasterizer with len={len(ret)}\")\n if verbose:\n print(\n f\"render time: {(time.time() - start_time)*1000:.3f}ms\",\n )\n ret = {\n \"rgb\": rendered_image,\n \"dep\": depth,\n \"alpha\": alpha,\n \"viewspace_points\": screenspace_points,\n \"visibility_filter\": radii > 0,\n \"radii\": radii,\n }\n if center_handling_flag:\n for k in [\"rgb\", \"dep\", \"alpha\"]:\n if ret[k] is None:\n continue\n if left_w > right_w:\n ret[k] = ret[k][:, :, :W]\n else:\n ret[k] = ret[k][:, :, -W:]\n if top_h > bottom_h:\n ret[k] = ret[k][:, :H, :]\n else:\n ret[k] = ret[k][:, -H:, :]\n return ret" }, { "identifier": "transform_mu_frame", "path": "lib_gart/model_utils.py", "snippet": "def transform_mu_frame(mu, frame, T):\n if len(mu) != len(T):\n assert len(mu) == 1 and len(frame) == 1\n mu = mu.expand(len(T), -1, -1)\n frame = frame.expand(len(T), -1, -1, -1)\n R, t = T[:, :3, :3], T[:, :3, 3]\n new_frame = torch.einsum(\"bij, bnjk->bnik\", R, frame)\n new_mu = torch.einsum(\"bij, bnj->bni\", R, mu) + t[:, None]\n return new_mu, new_frame" }, { "identifier": "viz_render", "path": "utils/viz.py", "snippet": "def viz_render(gt_rgb, gt_mask, pred_pkg, save_path=None):\n pred_rgb = pred_pkg[\"rgb\"].permute(1, 2, 0)\n pred_mask = pred_pkg[\"alpha\"].squeeze(0)\n pred_depth = pred_pkg[\"dep\"].squeeze(0)\n fig = plt.figure(figsize=(20, 5))\n plt.subplot(1, 5, 1)\n plt.imshow(torch.clamp(gt_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"GT\"), plt.axis(\"off\")\n plt.subplot(1, 5, 2)\n plt.imshow(torch.clamp(pred_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"Pred view\"), plt.axis(\"off\")\n plt.subplot(1, 5, 3)\n error = torch.clamp(abs(pred_rgb - gt_rgb), 0.0, 1.0).detach().cpu().numpy().max(axis=-1)\n cmap = plt.imshow(error)\n plt.title(\"Render Error (max in rgb)\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.subplot(1, 5, 4)\n error = torch.clamp(pred_mask - gt_mask, -1.0, 1.0).detach().cpu().numpy()\n cmap = plt.imshow(error)\n plt.title(\"(Pr - GT) Mask Error\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n \n plt.subplot(1, 5, 5)\n depth = pred_depth.detach().cpu().numpy()\n cmap = plt.imshow(depth)\n plt.title(\"Pred Depth\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.tight_layout()\n fig.canvas.draw()\n fig_np = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n fig_np = fig_np.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n if save_path is not None:\n plt.savefig(save_path)\n plt.close(fig)\n return fig_np" }, { "identifier": "sample_camera", "path": "lib_guidance/camera_sampling.py", "snippet": "def sample_camera(\n global_step=1,\n n_view=4,\n real_batch_size=1,\n random_azimuth_range=[-180.0, 180.0],\n random_elevation_range=[0.0, 30.0],\n eval_elevation_deg=15,\n camera_distance_range=[0.8, 1.0], # relative\n fovy_range=[15, 60],\n zoom_range=[1.0, 1.0],\n progressive_until=0,\n relative_radius=True,\n):\n # camera_perturb = 0.0\n # center_perturb = 0.0\n # up_perturb: 0.0\n\n # ! from uncond.py\n # ThreeStudio has progressive increase of camera poses, from eval to random\n r = min(1.0, global_step / (progressive_until + 1))\n elevation_range = [\n (1 - r) * eval_elevation_deg + r * random_elevation_range[0],\n (1 - r) * eval_elevation_deg + r * random_elevation_range[1],\n ]\n azimuth_range = [\n (1 - r) * 0.0 + r * random_azimuth_range[0],\n (1 - r) * 0.0 + r * random_azimuth_range[1],\n ]\n\n # sample elevation angles\n if random.random() < 0.5:\n # sample elevation angles uniformly with a probability 0.5 (biased towards poles)\n elevation_deg = (\n torch.rand(real_batch_size) * (elevation_range[1] - elevation_range[0])\n + elevation_range[0]\n ).repeat_interleave(n_view, dim=0)\n elevation = elevation_deg * math.pi / 180\n else:\n # otherwise sample uniformly on sphere\n elevation_range_percent = [\n (elevation_range[0] + 90.0) / 180.0,\n (elevation_range[1] + 90.0) / 180.0,\n ]\n # inverse transform sampling\n elevation = torch.asin(\n 2\n * (\n torch.rand(real_batch_size)\n * (elevation_range_percent[1] - elevation_range_percent[0])\n + elevation_range_percent[0]\n )\n - 1.0\n ).repeat_interleave(n_view, dim=0)\n elevation_deg = elevation / math.pi * 180.0\n\n # sample azimuth angles from a uniform distribution bounded by azimuth_range\n # ensures sampled azimuth angles in a batch cover the whole range\n azimuth_deg = (\n torch.rand(real_batch_size).reshape(-1, 1) + torch.arange(n_view).reshape(1, -1)\n ).reshape(-1) / n_view * (azimuth_range[1] - azimuth_range[0]) + azimuth_range[0]\n azimuth = azimuth_deg * math.pi / 180\n\n ######## Different from original ########\n # sample fovs from a uniform distribution bounded by fov_range\n fovy_deg = (\n torch.rand(real_batch_size) * (fovy_range[1] - fovy_range[0]) + fovy_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy_deg * math.pi / 180\n\n # sample distances from a uniform distribution bounded by distance_range\n camera_distances = (\n torch.rand(real_batch_size) * (camera_distance_range[1] - camera_distance_range[0])\n + camera_distance_range[0]\n ).repeat_interleave(n_view, dim=0)\n if relative_radius:\n scale = 1 / torch.tan(0.5 * fovy)\n camera_distances = scale * camera_distances\n\n # zoom in by decreasing fov after camera distance is fixed\n zoom = (\n torch.rand(real_batch_size) * (zoom_range[1] - zoom_range[0]) + zoom_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy * zoom\n fovy_deg = fovy_deg * zoom\n ###########################################\n\n # convert spherical coordinates to cartesian coordinates\n # right hand coordinate system, x back, y right, z up\n # elevation in (-90, 90), azimuth from +x to +y in (-180, 180)\n camera_positions = torch.stack(\n [\n camera_distances * torch.cos(elevation) * torch.cos(azimuth),\n camera_distances * torch.cos(elevation) * torch.sin(azimuth),\n camera_distances * torch.sin(elevation),\n ],\n dim=-1,\n )\n\n azimuth, elevation\n # build opencv camera\n z = -torch.stack(\n [\n torch.cos(elevation) * torch.cos(azimuth),\n torch.cos(elevation) * torch.sin(azimuth),\n torch.sin(elevation),\n ],\n -1,\n ) # nview, 3\n # up is 0,0,1\n x = torch.cross(z, torch.tensor([0.0, 0.0, 1.0], device=z.device).repeat(n_view, 1), -1)\n y = torch.cross(z, x, -1)\n\n R_wc = torch.stack([x, y, z], dim=2) # nview, 3, 3, col is basis\n t_wc = camera_positions\n\n T_wc = torch.eye(4, device=R_wc.device).repeat(n_view, 1, 1)\n T_wc[:, :3, :3] = R_wc\n T_wc[:, :3, 3] = t_wc\n\n return T_wc, fovy_deg # B,4,4, B" }, { "identifier": "fov2K", "path": "lib_guidance/camera_sampling.py", "snippet": "def fov2K(fov=90, H=256, W=256):\n if isinstance(fov, torch.Tensor):\n f = H / (2 * torch.tan(fov / 2 * np.pi / 180))\n K = torch.eye(3).repeat(fov.shape[0], 1, 1).to(fov)\n K[:, 0, 0], K[:, 0, 2] = f, W / 2.0\n K[:, 1, 1], K[:, 1, 2] = f, H / 2.0\n return K.clone()\n else:\n f = H / (2 * np.tan(fov / 2 * np.pi / 180))\n K = np.eye(3)\n K[0, 0], K[0, 2] = f, W / 2.0\n K[1, 1], K[1, 2] = f, H / 2.0\n return K.copy()" }, { "identifier": "opencv2blender", "path": "lib_guidance/camera_sampling.py", "snippet": "def opencv2blender(T):\n ret = T.clone()\n # y,z are negative\n ret[:, :, 1] *= -1\n ret[:, :, 2] *= -1\n return ret" }, { "identifier": "viz_spinning", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_spinning(\n model,\n pose,\n trans,\n H,\n W,\n K,\n save_path,\n time_index=None,\n n_spinning=10,\n model_mask=None,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n device = pose.device\n mu, fr, s, o, sph, additional_ret = model(\n pose, trans, {\"t\": time_index}, active_sph_order=active_sph_order\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n\n viz_frames = []\n for vid in range(n_spinning):\n spin_R = (\n torch.from_numpy(euler2mat(0, 2 * np.pi * vid / n_spinning, 0, \"sxyz\"))\n .to(device)\n .float()\n )\n spin_t = mu.mean(1)[0]\n spin_t = (torch.eye(3).to(device) - spin_R) @ spin_t[:, None]\n spin_T = torch.eye(4).to(device)\n spin_T[:3, :3] = spin_R\n spin_T[:3, 3] = spin_t.squeeze(-1)\n viz_mu, viz_fr = transform_mu_frame(mu, fr, spin_T[None])\n\n render_pkg = render_cam_pcl(\n viz_mu[0],\n viz_fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=bg_color,\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(save_path, viz_frames)\n return" }, { "identifier": "viz_human_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_human_all(\n solver,\n data_provider: RealDataOptimizablePoseProviderPose = None,\n ckpt_dir=None,\n training_skip=1,\n n_spinning=40,\n novel_pose_dir=\"novel_poses\",\n novel_skip=2,\n model=None,\n model_mask=None,\n viz_name=\"\",\n export_mesh_flag=False, # remove this from release version\n):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_human_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n active_sph_order = int(model.max_sph_order)\n\n if data_provider is not None:\n # if ckpt_dir is None:\n # ckpt_dir = solver.log_dir\n # pose_path = osp.join(ckpt_dir, \"pose.pth\")\n pose_base_list = data_provider.pose_base_list\n pose_rest_list = data_provider.pose_rest_list\n global_trans_list = data_provider.global_trans_list\n pose_list = torch.cat([pose_base_list, pose_rest_list], 1)\n pose_list, global_trans_list = pose_list.to(\n solver.device\n ), global_trans_list.to(solver.device)\n rgb_list = data_provider.rgb_list\n mask_list = data_provider.mask_list\n K_list = data_provider.K_list\n H, W = rgb_list.shape[1:3]\n else:\n H, W = 512, 512\n K_list = [torch.from_numpy(fov2K(45, H, W)).float().to(solver.device)]\n global_trans_list = torch.zeros(1, 3).to(solver.device)\n global_trans_list[0, -1] = 3.0\n\n # viz training\n if data_provider is not None:\n print(\"Viz training...\")\n viz_frames = []\n for t in range(len(pose_list)):\n if t % training_skip != 0:\n continue\n pose = pose_list[t][None]\n K = K_list[t]\n trans = global_trans_list[t][None]\n time_index = torch.Tensor([t]).long().to(solver.device)\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n {\"t\": time_index}, # use time_index from training set\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n viz_frame = viz_render(rgb_list[t], mask_list[t], render_pkg)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/training.gif\", viz_frames)\n\n # viz static spinning\n print(\"Viz spinning...\")\n can_pose = model.template.canonical_pose.detach()\n viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, \"sxyz\"))\n viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float()\n can_pose[0] = viz_base_R_opencv.to(can_pose.device)\n can_pose = matrix_to_axis_angle(can_pose)[None]\n dapose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n dapose[:, 1, -1] = np.pi / 4\n dapose[:, 2, -1] = -np.pi / 4\n dapose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n tpose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n tpose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n to_viz = {\"cano-pose\": can_pose, \"t-pose\": tpose, \"da-pose\": dapose}\n if data_provider is not None:\n to_viz[\"first-frame\"] = pose_list[0][None]\n\n for name, pose in to_viz.items():\n print(f\"Viz novel {name}...\")\n # if export_mesh_flag:\n # from lib_marchingcubes.gaumesh_utils import MeshExtractor\n # # also extract a mesh\n # mesh = solver.extract_mesh(model, pose)\n # mesh.export(f\"{viz_dir}/mc_{name}.obj\", \"obj\")\n\n # # for making figures, the rotation is in another way\n # viz_spinning_self_rotate(\n # model,\n # solver.viz_base_R.detach(),\n # pose,\n # global_trans_list[0][None],\n # H,\n # W,\n # K_list[0],\n # f\"{viz_dir}/{name}_selfrotate.gif\",\n # time_index=None, # if set to None and use t, the add_bone will hand this\n # n_spinning=n_spinning,\n # active_sph_order=model.max_sph_order,\n # )\n viz_spinning(\n model,\n pose,\n global_trans_list[0][None],\n H,\n W,\n K_list[0],\n f\"{viz_dir}/{name}.gif\",\n time_index=None, # if set to None and use t, the add_bone will hand this\n n_spinning=n_spinning,\n active_sph_order=model.max_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n\n # viz novel pose dynamic spinning\n print(\"Viz novel seq...\")\n novel_pose_names = [\n f[:-4] for f in os.listdir(novel_pose_dir) if f.endswith(\".npy\")\n ]\n seq_viz_todo = {}\n for name in novel_pose_names:\n novel_pose_fn = osp.join(novel_pose_dir, f\"{name}.npy\")\n novel_poses = np.load(novel_pose_fn, allow_pickle=True)\n novel_poses = novel_poses[::novel_skip]\n N_frames = len(novel_poses)\n novel_poses = torch.from_numpy(novel_poses).float().to(solver.device)\n novel_poses = novel_poses.reshape(N_frames, 24, 3)\n\n seq_viz_todo[name] = (novel_poses, N_frames)\n if data_provider is not None:\n seq_viz_todo[\"training\"] = [pose_list, len(pose_list)]\n\n for name, (novel_poses, N_frames) in seq_viz_todo.items():\n base_R = solver.viz_base_R.detach().cpu().numpy()\n viz_frames = []\n K = K_list[0]\n for vid in range(N_frames):\n pose = novel_poses[vid][None]\n # pose = novel_poses[0][None] # debug\n rotation = euler2mat(2 * np.pi * vid / N_frames, 0.0, 0.0, \"syxz\")\n rotation = torch.from_numpy(rotation @ base_R).float().to(solver.device)\n pose[:, 0] = matrix_to_axis_angle(rotation[None])[0]\n trans = global_trans_list[0][None]\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n # not pass in {}, so t is auto none\n additional_dict={},\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n # bg_color=[1.0, 1.0, 1.0], # ! use white bg for viz\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/novel_pose_{name}.gif\", viz_frames)\n return" }, { "identifier": "viz_dog_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_dog_all(solver, data_provider, model=None, ckpt_dir=None, viz_name=\"\"):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_dog_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True) # use mean pose for viz \n limb = viz_pose[:, -7:] \n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n pose[:, :-3] = 0 # exclude ears and mouth poses\n\n viz_pose = torch.concat([pose.reshape(1, -1), limb], dim=1)\n viz_trans = torch.tensor([[0.0, -0.3, 25.0]], device=\"cuda:0\")\n\n viz_dog_spin(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin.gif\"),\n n_spinning=42,\n )\n\n viz_dog_spin2(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin2.gif\"),\n n_spinning=20,\n )\n\n ######################################################################\n # Dataset pose seq\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True)\n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n limb = viz_pose[:, -7:]\n\n # Animation\n aroot = osp.join(osp.dirname(__file__), \"novel_poses/husky\")\n window = list(range(350, 440)) # Run\n trans = torch.tensor([[0.3, -0.3, 25.0]], device=\"cuda:0\")\n files = [f\"{aroot}/{i:04d}.npz\" for i in window]\n pose_list = [dict(np.load(file))[\"pred_pose\"] for file in files]\n pose_list = np.concatenate(pose_list)\n animation = matrix_to_axis_angle(torch.from_numpy(pose_list)).to(solver.device)\n animation[:, [32, 33, 34]] = pose[:, [32, 33, 34]] \n\n viz_dog_animation(\n model.to(\"cuda\"),\n animation,\n limb,\n trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"animation.gif\"),\n fps=12,\n )\n return" }, { "identifier": "ssim", "path": "utils/ssim.py", "snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "test", "path": "test_utils/test_func.py", "snippet": "def test(\n solver,\n seq_name: str,\n tto_flag=True,\n tto_step=300,\n tto_decay=60,\n tto_decay_factor=0.5,\n pose_base_lr=3e-3,\n pose_rest_lr=3e-3,\n trans_lr=3e-3,\n dataset_mode=\"people_snapshot\",\n training_optimized_seq=None,\n):\n device = solver.device\n model = solver.load_saved_model()\n\n assert dataset_mode in [\n \"people_snapshot\",\n \"zju\",\n \"instant_avatar_wild\",\n \"dog_demo\",\n ], f\"Unknown dataset mode {dataset_mode}\"\n\n if dataset_mode == \"people_snapshot\":\n eval_mode = \"avatar\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n elif dataset_mode == \"zju\":\n eval_mode = \"nvr\"\n test_dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n bg = [0.0, 0.0, 0.0] # zju use black background\n elif dataset_mode == \"instant_avatar_wild\":\n eval_mode = \"avatar\"\n test_dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=1.0,\n # ! warning, here follow the `ubc_hard.yaml` in InstAVT setting, use slicing\n start_end_skip=[2, 1000000000, 4],\n )\n bg = [1.0, 1.0, 1.0]\n\n test_len = len(test_dataset)\n assert (training_optimized_seq.total_t == test_len) or (\n training_optimized_seq.total_t == 1 + test_len\n ), \"Now UBC can only support the same length of training and testing or + 1\"\n test_dataset.smpl_params[\"body_pose\"] = (\n training_optimized_seq.pose_rest_list.reshape(-1, 69)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"global_orient\"] = (\n training_optimized_seq.pose_base_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"transl\"] = (\n training_optimized_seq.global_trans_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n elif dataset_mode == \"dog_demo\":\n eval_mode = \"avatar_brightness\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = DogDemoDataset(\n data_root=\"./data/dog_data_official/\", video_name=seq_name, test=True\n )\n else:\n raise NotImplementedError()\n\n evaluator = get_evaluator(eval_mode, device)\n\n _save_eval_maps(\n solver.log_dir,\n \"test\",\n model,\n solver,\n test_dataset,\n dataset_mode=dataset_mode,\n device=device,\n bg=bg,\n tto_flag=tto_flag,\n tto_step=tto_step,\n tto_decay=tto_decay,\n tto_decay_factor=tto_decay_factor,\n tto_evaluator=evaluator,\n pose_base_lr=pose_base_lr,\n pose_rest_lr=pose_rest_lr,\n trans_lr=trans_lr,\n )\n\n if tto_flag:\n _evaluate_dir(evaluator, solver.log_dir, \"test_tto\")\n else:\n _evaluate_dir(evaluator, solver.log_dir, \"test\")\n\n return" } ]
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
20,848
self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED)
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu")) return provider return provider def prepare_real_seq( self, seq_name, dataset_mode, split, ins_avt_wild_start_end_skip=None, image_zoom_ratio=0.5, data_stay_gpu_flag=True, ): provider, dataset = prepare_real_seq( seq_name=seq_name, dataset_mode=dataset_mode, split=split, ins_avt_wild_start_end_skip=ins_avt_wild_start_end_skip, image_zoom_ratio=getattr( self, "IMAGE_ZOOM_RATIO", image_zoom_ratio ), # ! this overwrite the func arg balance=getattr(self, "VIEW_BALANCE_FLAG", False), ) provider.to(self.device) if getattr(self, "DATA_STAY_GPU_FLAG", data_stay_gpu_flag): provider.move_images_to_device(self.device) provider.viz_selection_prob( osp.join(self.log_dir, f"split_{split}_view_prob.png") ) return provider, dataset def load_saved_model(self, ckpt_path=None): if ckpt_path is None: ckpt_path = osp.join(self.log_dir, "model.pth") ret = self._get_model_optimizer(betas=None) model = ret[0] model.load(torch.load(ckpt_path)) model.to(self.device) model.eval() logging.info("After loading:") model.summary() return model def _get_model_optimizer(self, betas, add_bones_total_t=0): seed_everything(self.SEED)
template = get_template(
2
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
15,383
ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try:
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try:
xyz, rgb, _ = read_points3D_binary(bin_path)
5
2023-11-29 07:10:39+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n In addition the pipeline inherits the following loading methods:\n - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):\n Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets\n as a list, the outputs from each ControlNet are added together to create one combined additional\n conditioning.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPImageProcessor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n _optional_components = [\"safety_checker\", \"feature_extractor\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n\n if safety_checker is None and requires_safety_checker:\n logger.warning(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n if safety_checker is not None and feature_extractor is None:\n raise ValueError(\n \"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety\"\n \" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.\"\n )\n\n if isinstance(controlnet, (list, tuple)):\n controlnet = MultiControlNetModel(controlnet)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n controlnet=controlnet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n self.register_to_config(requires_safety_checker=requires_safety_checker)\n\n def _init_tiled_vae(self,\n encoder_tile_size = 256,\n decoder_tile_size = 256,\n fast_decoder = False,\n fast_encoder = False,\n color_fix = False,\n vae_to_gpu = True):\n # save original forward (only once)\n if not hasattr(self.vae.encoder, 'original_forward'):\n setattr(self.vae.encoder, 'original_forward', self.vae.encoder.forward)\n if not hasattr(self.vae.decoder, 'original_forward'):\n setattr(self.vae.decoder, 'original_forward', self.vae.decoder.forward)\n\n encoder = self.vae.encoder\n decoder = self.vae.decoder\n\n self.vae.encoder.forward = VAEHook(\n encoder, encoder_tile_size, is_decoder=False, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n self.vae.decoder.forward = VAEHook(\n decoder, decoder_tile_size, is_decoder=True, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several\n steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling\n def enable_vae_tiling(self):\n r\"\"\"\n Enable tiled VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in\n several steps. This is useful to save a large amount of memory and to allow the processing of larger images.\n \"\"\"\n self.vae.enable_tiling()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling\n def disable_vae_tiling(self):\n r\"\"\"\n Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_tiling()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n Note that offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:\n cpu_offload(cpu_offloaded_model, device)\n\n if self.safety_checker is not None:\n cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n # the safety checker can offload the vae again\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # control net hook has be manually offloaded as it alternates with unet\n cpu_offload_with_hook(self.controlnet, device)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook\n\n @property\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n ram_encoder_hidden_states: Optional[torch.FloatTensor] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n \"\"\"\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n prompt = self.maybe_convert_prompt(prompt, self.tokenizer)\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance and negative_prompt_embeds is None:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif prompt is not None and type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n ram_encoder_hidden_states = torch.cat([ram_encoder_hidden_states, ram_encoder_hidden_states])\n\n return prompt_embeds, ram_encoder_hidden_states\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is None:\n has_nsfw_concept = None\n else:\n if torch.is_tensor(image):\n feature_extractor_input = self.image_processor.postprocess(image, output_type=\"pil\")\n else:\n feature_extractor_input = self.image_processor.numpy_to_pil(image)\n safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors=\"pt\").to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n return image, has_nsfw_concept\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents\n def decode_latents(self, latents):\n warnings.warn(\n \"The decode_latents method is deprecated and will be removed in a future version. Please\"\n \" use VaeImageProcessor instead\",\n FutureWarning,\n )\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents, return_dict=False)[0]\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n #extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(\n self,\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n controlnet_conditioning_scale=1.0,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n # `prompt` needs more sophisticated handling when there are multiple\n # conditionings.\n if isinstance(self.controlnet, MultiControlNetModel):\n if isinstance(prompt, list):\n logger.warning(\n f\"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}\"\n \" prompts. The conditionings will be fixed across the prompts.\"\n )\n\n # Check `image`\n is_compiled = hasattr(F, \"scaled_dot_product_attention\") and isinstance(\n self.controlnet, torch._dynamo.eval_frame.OptimizedModule\n )\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n self.check_image(image, prompt, prompt_embeds)\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if not isinstance(image, list):\n raise TypeError(\"For multiple controlnets: `image` must be type `list`\")\n\n # When `image` is a nested list:\n # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])\n elif any(isinstance(i, list) for i in image):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif len(image) != len(self.controlnet.nets):\n raise ValueError(\n \"For multiple controlnets: `image` must have the same length as the number of controlnets.\"\n )\n\n for image_ in image:\n self.check_image(image_, prompt, prompt_embeds)\n else:\n assert False\n\n # Check `controlnet_conditioning_scale`\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n if not isinstance(controlnet_conditioning_scale, float):\n raise TypeError(\"For single controlnet: `controlnet_conditioning_scale` must be type `float`.\")\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if isinstance(controlnet_conditioning_scale, list):\n if any(isinstance(i, list) for i in controlnet_conditioning_scale):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(\n self.controlnet.nets\n ):\n raise ValueError(\n \"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have\"\n \" the same length as the number of controlnets\"\n )\n else:\n assert False\n\n def check_image(self, image, prompt, prompt_embeds):\n image_is_pil = isinstance(image, PIL.Image.Image)\n image_is_tensor = isinstance(image, torch.Tensor)\n image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n raise TypeError(\n \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n )\n\n if image_is_pil:\n image_batch_size = 1\n elif image_is_tensor:\n image_batch_size = image.shape[0]\n elif image_is_pil_list:\n image_batch_size = len(image)\n elif image_is_tensor_list:\n image_batch_size = len(image)\n\n if prompt is not None and isinstance(prompt, str):\n prompt_batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n prompt_batch_size = len(prompt)\n elif prompt_embeds is not None:\n prompt_batch_size = prompt_embeds.shape[0]\n\n if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n raise ValueError(\n f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n )\n\n def prepare_image(\n self,\n image,\n width,\n height,\n batch_size,\n num_images_per_prompt,\n device,\n dtype,\n do_classifier_free_guidance=False,\n guess_mode=False,\n ):\n if not isinstance(image, torch.Tensor):\n if isinstance(image, PIL.Image.Image):\n image = [image]\n\n if isinstance(image[0], PIL.Image.Image):\n images = []\n\n for image_ in image:\n image_ = image_.convert(\"RGB\")\n #image_ = image_.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"])\n image_ = np.array(image_)\n image_ = image_[None, :]\n images.append(image_)\n\n image = images\n\n image = np.concatenate(image, axis=0)\n image = np.array(image).astype(np.float32) / 255.0\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image)#.flip(1)\n elif isinstance(image[0], torch.Tensor):\n image = torch.cat(image, dim=0)\n\n image_batch_size = image.shape[0]\n\n if image_batch_size == 1:\n repeat_by = batch_size\n else:\n # image batch size is the same as prompt batch size\n repeat_by = num_images_per_prompt\n\n image = image.repeat_interleave(repeat_by, dim=0)\n\n image = image.to(device=device, dtype=dtype)\n\n if do_classifier_free_guidance and not guess_mode:\n image = torch.cat([image] * 2)\n\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n #latents = randn_tensor(shape, generator=None, device=device, dtype=dtype)\n #offset_noise = torch.randn(batch_size, num_channels_latents, 1, 1, device=device)\n #latents = latents + 0.1 * offset_noise\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def _default_height_width(self, height, width, image):\n # NOTE: It is possible that a list of images have different\n # dimensions for each image, so just checking the first image\n # is not _exactly_ correct, but it is simple.\n while isinstance(image, list):\n image = image[0]\n\n if height is None:\n if isinstance(image, PIL.Image.Image):\n height = image.height\n elif isinstance(image, torch.Tensor):\n height = image.shape[2]\n\n height = (height // 8) * 8 # round down to nearest multiple of 8\n\n if width is None:\n if isinstance(image, PIL.Image.Image):\n width = image.width\n elif isinstance(image, torch.Tensor):\n width = image.shape[3]\n\n width = (width // 8) * 8 # round down to nearest multiple of 8\n\n return height, width\n\n # override DiffusionPipeline\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n safe_serialization: bool = False,\n variant: Optional[str] = None,\n ):\n if isinstance(self.controlnet, ControlNetModel):\n super().save_pretrained(save_directory, safe_serialization, variant)\n else:\n raise NotImplementedError(\"Currently, the `save_pretrained()` is not implemented for Multi-ControlNet.\")\n \n def _gaussian_weights(self, tile_width, tile_height, nbatches):\n \"\"\"Generates a gaussian mask of weights for tile contributions\"\"\"\n from numpy import pi, exp, sqrt\n import numpy as np\n\n latent_width = tile_width\n latent_height = tile_height\n\n var = 0.01\n midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1\n x_probs = [exp(-(x-midpoint)*(x-midpoint)/(latent_width*latent_width)/(2*var)) / sqrt(2*pi*var) for x in range(latent_width)]\n midpoint = latent_height / 2\n y_probs = [exp(-(y-midpoint)*(y-midpoint)/(latent_height*latent_height)/(2*var)) / sqrt(2*pi*var) for y in range(latent_height)]\n\n weights = np.outer(y_probs, x_probs)\n return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))\n\n @perfcount\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n image_sr = None,\n start_steps = 999,\n start_point = 'noise',\n ram_encoder_hidden_states=None,\n latent_tiled_size=320,\n latent_tiled_overlap=4,\n args=None\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,\n `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):\n The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If\n the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can\n also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If\n height and/or width are passed, `image` is resized according to them. If multiple ControlNets are\n specified in init, images must be passed as a list such that each element of the list can be correctly\n batched for input to a single controlnet.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):\n The outputs of the controlnet are multiplied by `conditioning_scale` before they are added\n to the residual in the original unet. If multiple ControlNets are specified in init, you can set the\n corresponding scale as a list.\n guess_mode (`bool`, *optional*, defaults to `False`):\n In this mode, the ControlNet encoder will try best to recognize the content of the input image even if\n you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height, width = self._default_height_width(height, width, image)\n \n # 1. Check inputs. Raise error if not correct\n \"\"\"\n self.check_inputs(\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n conditioning_scale,\n )\n \"\"\"\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n \"\"\"\n if isinstance(controlnet, MultiControlNetModel) and isinstance(conditioning_scale, float):\n conditioning_scale = [conditioning_scale] * len(controlnet.nets)\n \n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n \n guess_mode = guess_mode or global_pool_conditions\n \"\"\"\n\n # 3. Encode input prompt\n prompt_embeds, ram_encoder_hidden_states = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n ram_encoder_hidden_states=ram_encoder_hidden_states\n )\n\n # 4. Prepare image\n image = self.prepare_image(\n image=image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 6. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare the start point\n if start_point == 'noise':\n latents = latents\n elif start_point == 'lr': # LRE Strategy\n latents_condition_image = self.vae.encode(image*2-1).latent_dist.sample()\n latents_condition_image = latents_condition_image * self.vae.config.scaling_factor\n start_steps_tensor = torch.randint(start_steps, start_steps+1, (latents.shape[0],), device=latents.device)\n start_steps_tensor = start_steps_tensor.long()\n latents = self.scheduler.add_noise(latents_condition_image[0:1, ...], latents, start_steps_tensor)\n \n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n \n _, _, h, w = latents.size()\n tile_size, tile_overlap = (latent_tiled_size, latent_tiled_overlap) if args is not None else (256, 8)\n if h*w<=tile_size*tile_size:\n print(f\"[Tiled Latent]: the input size is tiny and unnecessary to tile.\")\n else:\n print(f\"[Tiled Latent]: the input size is {image.shape[-2]}x{image.shape[-1]}, need to tiled\")\n\n for i, t in enumerate(timesteps):\n # pass, if the timestep is larger than start_steps\n if t > start_steps:\n print(f'pass {t} steps.')\n continue\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n controlnet_latent_model_input = latents\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n \n else:\n controlnet_latent_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if h*w<=tile_size*tile_size: # tiled latent input\n down_block_res_samples, mid_block_res_sample = [None]*10, None\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=image,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n else:\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n tile_size = min(tile_size, min(h, w))\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n\n grid_rows = 0\n cur_x = 0\n while cur_x < latent_model_input.size(-1):\n cur_x = max(grid_rows * tile_size-tile_overlap * grid_rows, 0)+tile_size\n grid_rows += 1\n\n grid_cols = 0\n cur_y = 0\n while cur_y < latent_model_input.size(-2):\n cur_y = max(grid_cols * tile_size-tile_overlap * grid_cols, 0)+tile_size\n grid_cols += 1\n\n input_list = []\n cond_list = []\n img_list = []\n noise_preds = []\n for row in range(grid_rows):\n noise_preds_row = []\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n\n # input tile dimensions\n input_tile = latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n input_list.append(input_tile)\n cond_tile = controlnet_latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n cond_list.append(cond_tile)\n img_tile = image[:, :, input_start_y*8:input_end_y*8, input_start_x*8:input_end_x*8]\n img_list.append(img_tile)\n\n if len(input_list) == batch_size or col == grid_cols-1:\n input_list_t = torch.cat(input_list, dim=0)\n cond_list_t = torch.cat(cond_list, dim=0)\n img_list_t = torch.cat(img_list, dim=0)\n #print(input_list_t.shape, cond_list_t.shape, img_list_t.shape, fg_mask_list_t.shape)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n cond_list_t,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=img_list_t,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n model_out = self.unet(\n input_list_t,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n\n #for sample_i in range(model_out.size(0)):\n # noise_preds_row.append(model_out[sample_i].unsqueeze(0))\n input_list = []\n cond_list = []\n img_list = []\n\n noise_preds.append(model_out)\n\n # Stitch noise predictions for all tiles\n noise_pred = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n contributors = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n # Add each tile contribution to overall latents\n for row in range(grid_rows):\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n \n noise_pred[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += noise_preds[row*grid_cols + col] * tile_weights\n contributors[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += tile_weights\n # Average overlapping areas with more than 1 contributor\n noise_pred /= contributors\n \n \n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n has_nsfw_concept = None\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]#.flip(1)\n #image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "load_dreambooth_lora", "path": "utils/misc.py", "snippet": "def load_dreambooth_lora(unet, vae=None, model_path=None, alpha=1.0, model_base=\"\"):\n if model_path is None: return unet\n \n if model_path.endswith(\".ckpt\"):\n base_state_dict = torch.load(model_path)['state_dict']\n elif model_path.endswith(\".safetensors\"):\n state_dict = {}\n with safe_open(model_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n state_dict[key] = f.get_tensor(key)\n \n is_lora = all(\"lora\" in k for k in state_dict.keys())\n if not is_lora:\n base_state_dict = state_dict\n else:\n base_state_dict = {}\n with safe_open(model_base, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n base_state_dict[key] = f.get_tensor(key)\n \n converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_state_dict, unet.config)\n unet_state_dict = unet.state_dict()\n for key in converted_unet_checkpoint:\n converted_unet_checkpoint[key] = alpha * converted_unet_checkpoint[key] + (1.0-alpha) * unet_state_dict[key]\n unet.load_state_dict(converted_unet_checkpoint, strict=False)\n\n if vae is not None:\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_state_dict, vae.config)\n vae.load_state_dict(converted_vae_checkpoint)\n \n return unet, vae" }, { "identifier": "wavelet_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def wavelet_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply wavelet reconstruction\n result_tensor = wavelet_reconstruction(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "adain_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def adain_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply adaptive instance normalization\n result_tensor = adaptive_instance_normalization(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "ram", "path": "ram/models/ram_lora.py", "snippet": "def ram(pretrained='', pretrained_condition='', **kwargs):\n model = RAMLora(**kwargs)\n\n if pretrained:\n if kwargs['vit'] == 'swin_b':\n model, msg = load_checkpoint_swinbase(model, pretrained, kwargs)\n elif kwargs['vit'] == 'swin_l':\n model, msg = load_checkpoint_swinlarge(model, pretrained, kwargs)\n else:\n model, msg = load_checkpoint(model, pretrained)\n print('vit:', kwargs['vit'])\n \n if pretrained_condition:\n model.load_state_dict(torch.load(pretrained_condition), strict=False)\n print(f'load lora weights from {pretrained_condition}')\n\n return model" }, { "identifier": "inference_ram", "path": "ram/inference.py", "snippet": "def inference_ram(image, model):\n\n with torch.no_grad():\n tags, tags_chinese = model.generate_tag(image)\n\n return tags[0],tags_chinese[0]" }, { "identifier": "get_transform", "path": "ram/transform.py", "snippet": "def get_transform(image_size=384):\n return Compose([\n convert_to_rgb,\n Resize((image_size, image_size)),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])" } ]
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
14,797
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline
validation_pipeline = StableDiffusionControlNetPipeline(
0
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n self.validate = validate\n self.gif = gif\n self.aspect = FLAGS.train_res[1] / FLAGS.train_res[0]\n self.fovy_range_min = np.deg2rad(FLAGS.fovy_range[0])\n self.fovy_range_max = np.deg2rad(FLAGS.fovy_range[1])\n self.elevation_range_min= np.deg2rad(FLAGS.elevation_range[0])\n self.elevation_range_max= np.deg2rad(FLAGS.elevation_range[1])\n self.angle_front = np.deg2rad(FLAGS.front_threshold)\n \n\n def _gif_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.display_res[1] / self.FLAGS.display_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 100) * np.pi * 2\n rotate_x = np.deg2rad(20)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(-rotate_x) @ util.rotate_y(ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n \n \n\n def _validate_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 4) * np.pi * 2\n rotate_x = np.random.uniform(-np.pi/4,np.pi/18)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(rotate_x) @ util.rotate_y( ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n\n def _train_scene(self, itr):\n fovy = np.random.uniform(self.fovy_range_min, self.fovy_range_max)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n if self.FLAGS.gpu_number == 8: # All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0,4]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1,5]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2,6]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3,7]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n elif self.FLAGS.gpu_number == 4: #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n else:\n rotate_y = np.random.uniform(np.deg2rad(-180), np.deg2rad(180)) #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n \n rotate_x = -np.random.uniform(self.elevation_range_min, self.elevation_range_max)\n # angle_front = np.deg2rad(45)\n prompt_index = get_view_direction(thetas= rotate_x, phis = rotate_y, front= self.angle_front)\n cam_radius = 3\n x = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n y = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n mv = util.translate(x, y, -cam_radius) @ (util.rotate_x(rotate_x) @ util.rotate_y(rotate_y))\n if ((itr+1)/self.FLAGS.batch) <=self.FLAGS.coarse_iter:\n rotate_y1 = np.random.uniform(0,np.pi*2) \n rotate_x1 = np.random.uniform(-np.pi,np.pi)\n normal_rotate = util.rotate_y_1(rotate_y1 )@ util.rotate_x_1(rotate_x1) \n else:\n normal_rotate = util.rotate_y_1(0)@util.rotate_x_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(rotate_y), torch.tensor([fovy])\n\n def __len__(self):\n if self.gif == True:\n return 100\n else:\n return 4 if self.validate else (self.FLAGS.iter + 1) * self.FLAGS.batch\n\n def __getitem__(self, itr):\n if self.gif:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._gif_scene(itr)\n elif self.validate:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._validate_scene(itr)\n else:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._train_scene(itr)\n\n return {\n 'mv' : mv,\n 'mvp' : mvp,\n 'campos' : campos,\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate': normal_rotate,\n 'prompt_index' : prompt_index,\n 'elev': elev,\n 'azim': azim,\n 'fov': fov\n }\n def collate(self, batch):\n iter_res, iter_spp = batch[0]['resolution'], batch[0]['spp']\n return {\n 'mv' : torch.cat(list([item['mv'] for item in batch]), dim=0),\n 'mvp' : torch.cat(list([item['mvp'] for item in batch]), dim=0),\n 'campos' : torch.cat(list([item['campos'] for item in batch]), dim=0),\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate' : torch.cat(list([item['normal_rotate'] for item in batch]), dim=0),\n # 'prompt_index' : torch.cat(list([item['prompt_index'] for item in batch]), dim=0),\n 'prompt_index' : np.array([item['prompt_index'] for item in batch], dtype=np.int32),\n 'elev' : np.array([item['elev'] for item in batch], dtype=np.float16),\n 'azim' : np.array([item['azim'] for item in batch], dtype=np.float16),\n 'fov' : torch.cat(list([item['fov'] for item in batch]), dim=0),\n }" }, { "identifier": "get_camera_params", "path": "dataset/dataset_mesh.py", "snippet": "def get_camera_params(resolution= 512, fov=45, elev_angle=-20, azim_angle=0):\n fovy = np.deg2rad(fov) \n elev = np.radians( elev_angle )\n azim = np.radians( azim_angle ) \n proj_mtx = util.perspective(fovy, resolution /resolution, 1, 50)\n mv = util.translate(0, 0, -3) @ (util.rotate_x(elev) @ util.rotate_y(azim))\n normal_rotate = util.rotate_y_1(-azim ) @ util.rotate_x_1(-elev) \n # nomral_rotate = util.rotate_y_1(0) @ util.rotate_x_1(0) \n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n bkgs = torch.ones(1, resolution, resolution, 3, dtype=torch.float32, device='cuda')\n return {\n 'mvp' : mvp[None, ...].cuda(),\n 'mv' : mv[None, ...].cuda(),\n 'campos' : campos[None, ...].cuda(),\n 'resolution' : [resolution, resolution], \n 'spp' : 1,\n 'background' : bkgs,\n 'normal_rotate' : normal_rotate[None,...].cuda(),\n 'elev_angle' : torch.tensor(elev_angle).cuda(),\n 'azim_angle' : torch.tensor(azim_angle).cuda(),\n 'fov' : torch.tensor(fovy).cuda(),\n }" }, { "identifier": "DMTetGeometry", "path": "geometry/dmtet_x_dreamer.py", "snippet": "class DMTetGeometry(torch.nn.Module):\n def __init__(self, grid_res, scale, FLAGS):\n super(DMTetGeometry, self).__init__()\n\n self.FLAGS = FLAGS\n self.grid_res = grid_res\n self.marching_tets = DMTet()\n \n tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))\n self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale\n print(\"tet grid min/max\", torch.min(self.verts).item(), torch.max(self.verts).item())\n self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.generate_edges()\n self.pos_encoder = CameraEncoder().to(self.verts.device)\n\n def generate_edges(self):\n with torch.no_grad():\n edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = \"cuda\")\n all_edges = self.indices[:,edges].reshape(-1,2) \n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n @torch.no_grad()\n def getAABB(self):\n return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values\n\n def getMesh(self, material):\n pred= self.decoder(self.verts)\n \n self.sdf , self.deform = pred[:, 0], pred[:, 1:] \n v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)\n verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)\n \n imesh = mesh.Mesh(verts, faces, material=material)\n imesh = mesh.auto_normals(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material) \n return render.render_mesh(glctx, \n opt_mesh, \n target['mvp'], \n target['campos'], \n lgt, \n target['resolution'], \n spp=target['spp'], \n msaa= True,\n background= target['background'],\n bsdf= bsdf,\n if_normal= if_normal,\n normal_rotate= target['normal_rotate'],\n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n \n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal= if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]]) # [B*2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z]) # [B * 2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n \n if iteration <=self.FLAGS.coarse_iter:\n t = torch.randint( guidance.min_step_early, guidance.max_step_early + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n pred_rgb_512 = buffers['shaded'][..., 0:4].permute(0, 3, 1, 2).contiguous() # [B, 4, 64, 64]\n latents = F.interpolate(pred_rgb_512, (64, 64), mode='bilinear', align_corners=False)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n \n else:\n t = torch.randint(guidance.min_step_late, guidance.max_step_late + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda')\n srgb = buffers['shaded'][...,0:3] #* buffers['shaded'][..., 3:4] # normal * mask\n # \n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [B, 3, 512, 512]\n latents = guidance.encode_imgs(pred_rgb_512)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n\n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings, index=indexs, came_posfeat=came_posfeat)\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred =noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond) # [B, 4, 64, 64]\n if iteration <= self.FLAGS.coarse_iter:\n w = (1 - guidance.alphas[t]) # [B]\n else:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w * (noise_pred - noise ) #*w1\n grad = torch.nan_to_num(grad)\n \n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n\n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask_sizes[i], mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "DLMesh", "path": "geometry/dlmesh_x_dreamer.py", "snippet": "class DLMesh(torch.nn.Module):\n def __init__(self, initial_guess, FLAGS):\n super(DLMesh, self).__init__()\n self.FLAGS = FLAGS\n self.initial_guess = initial_guess\n self.mesh = initial_guess.clone()\n self.pos_encoder = CameraEncoder().cuda()\n print(\"Base mesh has %d triangles and %d vertices.\" % (self.mesh.t_pos_idx.shape[0], self.mesh.v_pos.shape[0]))\n \n @torch.no_grad()\n def getAABB(self):\n return mesh.aabb(self.mesh)\n\n def getMesh(self, material):\n self.mesh.material = material\n\n imesh = mesh.Mesh(base=self.mesh)\n # Compute normals and tangent space\n imesh = mesh.auto_normals(imesh)\n imesh = mesh.compute_tangents(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None,if_normal=False, mode = 'appearance_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material)\n return render.render_mesh(glctx, \n opt_mesh,\n target['mvp'],\n target['campos'],\n lgt,\n target['resolution'], \n spp=target['spp'], \n msaa=True,\n background= target['background'] ,\n bsdf= bsdf,\n if_normal=if_normal,\n normal_rotate=target['normal_rotate'], \n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal = if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]])\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z])\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n\n if iteration <= self.FLAGS.coarse_iter:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_early, guidance.max_step_early+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n else:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_late, guidance.max_step_late+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n\n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [1, 3, H, W]\n latents = guidance.encode_imgs(pred_rgb_512)\n \n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states= text_embeddings, index=indexs, came_posfeat=came_posfeat)#.sample######################\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond)\n \n if guidance.sds_weight_strategy == 0:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 1:\n w = 1 / (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 2:\n if iteration <= self.FLAGS.coarse_iter:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n else:\n w = 1 / (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w* (noise_pred -noise) \n grad = torch.nan_to_num(grad)\n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n \n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask2.shape, mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "obj", "path": "render/obj.py", "snippet": "def _find_mat(materials, name):\ndef load_obj(filename, clear_ks=True, mtl_override=None):\ndef write_obj(folder, mesh, save_material=True):" }, { "identifier": "material", "path": "render/material.py", "snippet": "class Material(torch.nn.Module):\n def __init__(self, mat_dict):\n def __contains__(self, key):\n def __getitem__(self, key):\n def __setitem__(self, key, val):\n def __delitem__(self, key):\n def keys(self):\ndef load_mtl(fn, clear_ks=True):\ndef save_mtl(fn, material):\ndef _upscale_replicate(x, full_res):\ndef merge_materials(materials, texcoords, tfaces, mfaces):" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "mesh", "path": "render/mesh.py", "snippet": "class Mesh:\n def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):\n def copy_none(self, other):\n def clone(self):\ndef load_mesh(filename, mtl_override=None):\ndef aabb(mesh):\ndef compute_edges(attr_idx, return_inverse=False):\ndef compute_edge_to_face_mapping(attr_idx, return_inverse=False):\ndef unit_size(mesh):\ndef center_by_reference(base_mesh, ref_aabb, scale):\ndef auto_normals(imesh):\ndef compute_tangents(imesh):" }, { "identifier": "texture", "path": "render/texture.py", "snippet": "class texture2d_mip(torch.autograd.Function):\nclass Texture2D(torch.nn.Module):\n def forward(ctx, texture):\n def backward(ctx, dout):\n def __init__(self, init, min_max=None):\n def sample(self, texc, texc_deriv, filter_mode='linear-mipmap-linear'):\n def getRes(self):\n def getChannels(self):\n def getMips(self):\n def clamp_(self):\n def normalize_(self):\ndef create_trainable(init, res=None, auto_mipmaps=True, min_max=None):\ndef srgb_to_rgb(texture):\ndef rgb_to_srgb(texture):\ndef _load_mip2D(fn, lambda_fn=None, channels=None):\ndef load_texture2D(fn, lambda_fn=None, channels=None):\ndef _save_mip2D(fn, mip, mipidx, lambda_fn):\ndef save_texture2D(fn, tex, lambda_fn=None):" }, { "identifier": "mlptexture", "path": "render/mlptexture.py", "snippet": "class _MLP(torch.nn.Module):\nclass MLPTexture3D(torch.nn.Module):\n def __init__(self, cfg, loss_scale=1.0):\n def forward(self, x):\n def _init_weights(m):\n def __init__(self, AABB, channels = 3, internal_dims = 32, hidden = 1, min_max = None):\n def sample(self, texc):\n def clamp_(self):\n def cleanup(self):" }, { "identifier": "light", "path": "render/light.py", "snippet": "class cubemap_mip(torch.autograd.Function):\nclass EnvironmentLight(torch.nn.Module):\n def forward(ctx, cubemap):\n def backward(ctx, dout):\n def __init__(self, base):\n def xfm(self, mtx):\n def clone(self):\n def clamp_(self, min=None, max=None):\n def get_mip(self, roughness):\n def build_mips(self, cutoff=0.99):\n def regularizer(self):\n def shade(self, gb_pos, gb_normal, kd, ks, view_pos, specular=True):\ndef _load_env_hdr(fn, scale=1.0):\ndef load_env(fn, scale=1.0):\ndef save_env_map(fn, light):\ndef create_trainable_env_rnd(base_res, scale=0.5, bias=0.25):\n LIGHT_MIN_RES = 16\n MIN_ROUGHNESS = 0.08\n MAX_ROUGHNESS = 0.5" }, { "identifier": "render", "path": "render/render.py", "snippet": "def interpolate(attr, rast, attr_idx, rast_db=None):\ndef shade(\n gb_pos,\n gb_geometric_normal,\n gb_normal,\n gb_tangent,\n gb_texc,\n gb_texc_deriv,\n view_pos,\n lgt,\n material,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_layer(\n rast,\n rast_deriv,\n mesh,\n view_pos,\n lgt,\n resolution,\n spp,\n msaa,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_mesh(\n ctx,\n mesh,\n mtx_in,\n view_pos,\n lgt,\n resolution,\n spp = 1,\n num_layers = 1,\n msaa = False,\n background = None, \n bsdf = None,\n if_normal = False,\n normal_rotate = None,\n mode = 'geometry_modeling',\n if_flip_the_normal = False,\n if_use_bump = False\n ):\n def prepare_input_vector(x):\n def composite_buffer(key, layers, background, antialias):\ndef render_uv(ctx, mesh, resolution, mlp_texture):\ndef uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):\ndef render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):" }, { "identifier": "StableDiffusion", "path": "sd_cglora.py", "snippet": "class StableDiffusion(nn.Module):\n def __init__(self, \n device, \n mode='geometry', \n text= '', \n add_directional_text= False, \n batch = 1, \n guidance_weight = 100, \n sds_weight_strategy = 0,\n early_time_step_range = [0.02, 0.5],\n late_time_step_range = [0.02, 0.5]):\n super().__init__()\n\n self.device = device\n self.mode = mode\n self.text= text\n self.add_directional_text = add_directional_text\n self.batch = batch \n print(f'[INFO] loading stable diffusion...')\n model_key = \"stabilityai/stable-diffusion-2-1-base\"\n self.vae = AutoencoderKL.from_pretrained(model_key, subfolder=\"vae\",torch_dtype=torch.float16).to(self.device)\n self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder=\"tokenizer\",torch_dtype=torch.float16)\n self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder=\"text_encoder\",torch_dtype=torch.float16).to(self.device)\n self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder=\"unet\",torch_dtype=torch.float16).to(self.device)\n if is_xformers_available():\n self.unet.enable_xformers_memory_efficient_attention()\n self.negative_text = ''\n if add_directional_text:\n self.text_z = []\n self.uncond_z = []\n self.index = []\n self.uncond_index = []\n for d in ['front', 'side', 'back', 'side']:\n text = f\"{self.text}, {d} view\"\n # text = f\"{d} view of {self.text}\"\n negative_text = f\"{self.negative_text}\"\n # if d == 'back': negative_text += \"face\"\n text_z, index = self.get_text_embeds([text], batch = 1)\n uncond_z, uncond_index =self.get_uncond_embeds([negative_text], batch = 1)\n self.text_z.append(text_z)\n self.uncond_z.append(uncond_z)\n self.index.append(index)\n self.uncond_index.append(uncond_index)\n self.text_z = torch.cat(self.text_z)\n self.uncond_z = torch.cat(self.uncond_z)\n self.index = torch.cat(self.index)\n self.uncond_index = torch.cat(self.uncond_index)\n else: \n self.text_z, self.index = self.get_text_embeds([self.text], batch = self.batch)\n self.uncond_z =self.get_uncond_embeds([self.negative_text], batch = self.batch)\n # del self.text_encoder\n self.scheduler = DPMSolverMultistepScheduler.from_pretrained(model_key, subfolder=\"scheduler\", torch_dtype=torch.float16)\n self.num_train_timesteps = self.scheduler.config.num_train_timesteps\n self.min_step_early = int(self.num_train_timesteps * early_time_step_range[0])\n self.max_step_early = int(self.num_train_timesteps * early_time_step_range[1])\n self.min_step_late = int(self.num_train_timesteps * late_time_step_range[0])\n self.max_step_late = int(self.num_train_timesteps * late_time_step_range[1])\n self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience\n self.guidance_weight = guidance_weight\n self.sds_weight_strategy = sds_weight_strategy\n print(f'[INFO] loaded stable diffusion!')\n\n for p in self.parameters():\n p.requires_grad_(False)\n self.unet_lora_params, self.names = inject_trainable_cglora(self.unet) # This will\n\n\n def get_text_embeds_global(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n \n global_embedding = text_embeddings[:,text_input['input_ids'].argmax(dim=-1),:].squeeze()\n \n return global_embedding\n\n\n def get_text_embeds(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = text_input['input_ids'].argmax(dim=-1)\n #global_embedding = text_embeddings[:, index, :].squeeze()\n ##################################################################\n \n return text_embeddings, index\n \n def get_uncond_embeds(self, negative_prompt, batch):\n uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt')\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n \n if batch > 1:\n uncond_embeddings = uncond_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = uncond_input['input_ids'].argmax(dim=-1)\n # global_embedding = uncond_embeddings[:, index, :].squeeze()\n ##################################################################\n return uncond_embeddings,index\n\n def encode_imgs(self, imgs):\n # imgs: [B, 3, H, W]\n if self.mode == 'appearance_modeling':\n \n imgs = 2 * imgs - 1\n\n posterior = self.vae.encode(imgs).latent_dist\n latents = posterior.sample() * 0.18215\n\n return latents" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "Video", "path": "render/video.py", "snippet": "class Video():\n def __init__(self, path, name='video_log.mp4', mode='I', fps=30, codec='libx264', bitrate='16M') -> None:\n \n if path[-1] != \"/\":\n path += \"/\"\n \n self.writer = imageio.get_writer(path+name, mode=mode, fps=fps, codec=codec, bitrate=bitrate)\n \n def ready_image(self, image, write_video=True):\n # assuming channels last - as renderer returns it\n if len(image.shape) == 4: \n image = image.squeeze(0)[..., :3].detach().cpu().numpy()\n else:\n image = image[..., :3].detach().cpu().numpy()\n\n image = np.clip(np.rint(image*255.0), 0, 255).astype(np.uint8)\n\n if write_video:\n self.writer.append_data(image)\n\n return image\n\n def close(self):\n self.writer.close()" } ]
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
14,969
if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad():
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad():
params = get_camera_params(
1
2023-11-27 13:44:01+00:00
24k
zhenzhiwang/intercontrol
utils/model_util.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = th.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = th.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = th.add(th.mul(data, std), mean)\n return output\n \n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. #- model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n cond_joint = model_kwargs['y']['global_joint']\n mask = model_kwargs['y']['global_joint_mask']\n pred_joint = th.masked_select(pred_joint, mask.bool())\n cond_joint = th.masked_select(cond_joint, mask.bool())\n assert pred_joint.shape == cond_joint.shape, f\"pred_joint: {pred_joint.shape}, cond_joint: {cond_joint.shape}\"\n loss = self.mse_loss(pred_joint, cond_joint)\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n return pred_joint\n \n def global_joint_position_conditioning(self, x, model_kwargs=None):\n n_joints = 22 if x.shape[1] == 263 else 21\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n #pred_joint.requires_grad = True\n assert pred_joint.shape == model_kwargs['y']['global_joint'].shape == model_kwargs['y']['global_joint_mask'].shape, f\"pred_joint: {pred_joint.shape}, global_joint: {model_kwargs['y']['global_joint'].shape}, global_joint_mask: {model_kwargs['y']['global_joint_mask'].shape}\"\n loss = self.global_joint_condition_loss(pred_joint, model_kwargs['y']['global_joint'], model_kwargs['y']['global_joint_mask'])\n diff_scale = ((pred_joint.clamp(min=1e-4) - model_kwargs['y']['global_joint'].clamp(min=1e-4)).abs() / model_kwargs['y']['global_joint'].clamp(min=1e-4).abs()).mean().item()\n #loss.requires_grad = True\n gradient = th.autograd.grad(loss, x, \n grad_outputs=th.ones_like(loss),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradient.clone().detach(), loss.item(), diff_scale\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n #assert use_posterior == False\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = self.bfgs_times_first,\n k_last = self.bfgs_times_last,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def condition_mean_with_grad(self, cond_fn, x_mean, x_var, t, strength, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n with th.enable_grad():\n x_mean = x_mean.clone().detach().requires_grad_(True)\n gradient, loss_value, diff_scale = cond_fn(x_mean, model_kwargs) # p_mean_var[\"mean\"]\n gradient_guidance = - strength * gradient.float() # x_var.clamp(min = 0.01) \n new_mean = (x_mean + gradient_guidance).clone().detach()\n return new_mean, loss_value, gradient_guidance.clone().detach().abs().cpu(), x_mean.clone().detach().abs().cpu(), diff_scale\n\n\n def condition_mean_bfgs(self, x_mean, num_condition, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n \n with th.enable_grad():\n x_mean = x_mean.clone().detach().contiguous().requires_grad_(True)\n def closure():\n lbfgs.zero_grad()\n objective = self.global_joint_bfgs_optimize(x_mean, model_kwargs)\n objective.backward()\n return objective\n lbfgs = optim.LBFGS([x_mean],\n history_size=10, \n max_iter=4, \n line_search_fn=\"strong_wolfe\")\n for _ in range(num_condition):\n lbfgs.step(closure)\n #loss_value = self.global_joint_bfgs_optimize(x_mean, model_kwargs).item()\n return x_mean #, loss_value\n\n def p_mean_variance_bfgs_x0(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_output = self.condition_mean_bfgs(model_output, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n \n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def p_mean_variance_bfgs_posterior(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_mean = self.condition_mean_bfgs(model_mean, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None,\n use_posterior = True,\n k_first = 1,\n k_last = 10,\n t_threshold = 10,):\n \"\"\"\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key \"loss\" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n \"\"\"\n\n # enc = model.model._modules['module']\n model = self._wrap_model(model)\n \n enc = model.model\n mask = model_kwargs['y']['mask']\n get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation,\n glob=enc.glob,\n # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP\n jointstype='smpl', # 3.4 iter/sec\n vertstrans=False)\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise, model_kwargs=model_kwargs)\n \n #assert k_first == 1, \"k_first must be 1, {}\".format(k_first)\n #assert k_last == 10, \"k_last must be 10, {}\".format(k_last)\n assert use_posterior == True, \"use_posterior must be True, {}\".format(use_posterior)\n if use_posterior:\n '''\n # loss-guided condition in training time\n if t[0] >= t_threshold:\n assert (t >= t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n num_condition = k_first # else k_last\n else:\n num_condition = k_last\n assert (t < t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n '''\n num_condition = k_first\n x_t = self.condition_mean_bfgs(x_t, num_condition, model_kwargs=model_kwargs)\n\n terms = {}\n if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n\n assert model_output.shape == target.shape == x_start.shape, \"model_output {}, target {}, x_start {}\".format(model_output.shape ,target.shape ,x_start.shape) # [bs, njoints, nfeats, nframes]\n\n terms[\"rot_mse\"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse)\n\n terms[\"loss\"] = terms[\"rot_mse\"] + terms.get('vb', 0.) +\\\n (self.lambda_vel * terms.get('vel_mse', 0.)) +\\\n (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \\\n (self.lambda_fc * terms.get('fc', 0.))\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms" }, { "identifier": "wrap_model", "path": "model/cfg_sampler.py", "snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model" }, { "identifier": "MDM", "path": "model/mdm.py", "snippet": "class MDM(nn.Module):\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, **kargs):\n super().__init__()\n\n self.legacy = legacy\n self.modeltype = modeltype\n self.njoints = njoints\n self.nfeats = nfeats\n self.num_actions = num_actions\n self.data_rep = data_rep\n self.dataset = dataset\n\n self.pose_rep = pose_rep\n self.glob = glob\n self.glob_rot = glob_rot\n self.translation = translation\n\n self.latent_dim = latent_dim\n\n self.ff_size = ff_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.dropout = dropout\n\n self.ablation = ablation\n self.activation = activation\n self.clip_dim = clip_dim\n self.action_emb = kargs.get('action_emb', None)\n\n self.input_feats = self.njoints * self.nfeats\n\n self.normalize_output = kargs.get('normalize_encoder_output', False)\n\n self.cond_mode = kargs.get('cond_mode', 'no_cond')\n self.cond_mask_prob = kargs.get('cond_mask_prob', 0.)\n self.arch = arch\n self.gru_emb_dim = self.latent_dim if self.arch == 'gru' else 0\n self.input_process = InputProcess(self.data_rep, self.input_feats+self.gru_emb_dim, self.latent_dim)\n\n self.sequence_pos_encoder = PositionalEncoding(self.latent_dim, self.dropout)\n self.emb_trans_dec = emb_trans_dec\n\n if self.arch == 'trans_enc':\n print(\"TRANS_ENC init\")\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n elif self.arch == 'trans_dec':\n print(\"TRANS_DEC init\")\n seqTransDecoderLayer = nn.TransformerDecoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=activation)\n self.seqTransDecoder = nn.TransformerDecoder(seqTransDecoderLayer,\n num_layers=self.num_layers)\n elif self.arch == 'gru':\n print(\"GRU init\")\n self.gru = nn.GRU(self.latent_dim, self.latent_dim, num_layers=self.num_layers, batch_first=True)\n else:\n raise ValueError('Please choose correct architecture [trans_enc, trans_dec, gru]')\n\n self.embed_timestep = TimestepEmbedder(self.latent_dim, self.sequence_pos_encoder)\n\n if self.cond_mode != 'no_cond':\n if 'text' in self.cond_mode:\n self.embed_text = nn.Linear(self.clip_dim, self.latent_dim)\n print('EMBED TEXT')\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n if 'action' in self.cond_mode:\n self.embed_action = EmbedAction(self.num_actions, self.latent_dim)\n print('EMBED ACTION')\n\n self.output_process = OutputProcess(self.data_rep, self.input_feats, self.latent_dim, self.njoints,\n self.nfeats)\n\n self.rot2xyz = Rotation2xyz(device='cpu', dataset=self.dataset)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_mask_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_mask_prob).view(bs, 1) # 1-> use null_cond, 0-> use real cond\n return cond * (1. - mask)\n else:\n return cond\n\n def encode_text(self, raw_text):\n # raw_text - list (batch_size length) of strings with input text prompts\n device = next(self.parameters()).device\n max_text_len = 20 if self.dataset in ['humanml', 'kit'] else None # Specific hardcoding for humanml dataset\n if max_text_len is not None:\n default_context_length = 77\n context_length = max_text_len + 2 # start_token + 20 + end_token\n assert context_length < default_context_length\n texts = clip.tokenize(raw_text, context_length=context_length, truncate=True).to(device) # [bs, context_length] # if n_tokens > context_length -> will truncate\n # print('texts', texts.shape)\n zero_pad = torch.zeros([texts.shape[0], default_context_length-context_length], dtype=texts.dtype, device=texts.device)\n texts = torch.cat([texts, zero_pad], dim=1)\n # print('texts after pad', texts.shape, texts)\n else:\n texts = clip.tokenize(raw_text, truncate=True).to(device) # [bs, context_length] # if n_tokens > 77 -> will truncate\n return self.clip_model.encode_text(texts).float()\n\n def forward(self, x, timesteps, y=None):\n \"\"\"\n x: [batch_size, njoints, nfeats, max_frames], denoted x_t in the paper\n timesteps: [batch_size] (int)\n \"\"\"\n bs, njoints, nfeats, nframes = x.shape\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n if self.arch == 'gru':\n x_reshaped = x.reshape(bs, njoints*nfeats, 1, nframes)\n emb_gru = emb.repeat(nframes, 1, 1) #[#frames, bs, d]\n emb_gru = emb_gru.permute(1, 2, 0) #[bs, d, #frames]\n emb_gru = emb_gru.reshape(bs, self.latent_dim, 1, nframes) #[bs, d, 1, #frames]\n x = torch.cat((x_reshaped, emb_gru), axis=1) #[bs, d+joints*feat, 1, #frames]\n\n x = self.input_process(x)\n\n if self.arch == 'trans_enc':\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder(xseq)[1:] # , src_key_padding_mask=~maskseq) # [seqlen, bs, d]\n\n elif self.arch == 'trans_dec':\n if self.emb_trans_dec:\n xseq = torch.cat((emb, x), axis=0)\n else:\n xseq = x\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n if self.emb_trans_dec:\n output = self.seqTransDecoder(tgt=xseq, memory=emb)[1:] # [seqlen, bs, d] # FIXME - maybe add a causal mask\n else:\n output = self.seqTransDecoder(tgt=xseq, memory=emb)\n elif self.arch == 'gru':\n xseq = x\n xseq = self.sequence_pos_encoder(xseq) # [seqlen, bs, d]\n output, _ = self.gru(xseq)\n\n output = self.output_process(output) # [bs, njoints, nfeats, nframes]\n return output\n\n\n def _apply(self, fn):\n super()._apply(fn)\n self.rot2xyz.smpl_model._apply(fn)\n\n\n def train(self, *args, **kwargs):\n super().train(*args, **kwargs)\n self.rot2xyz.smpl_model.train(*args, **kwargs)" }, { "identifier": "ControlMDM", "path": "model/ControlMDM.py", "snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output" }, { "identifier": "gaussian_diffusion", "path": "diffusion/gaussian_diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def is_vb(self):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n rescale_timesteps=False,\n lambda_rcxyz=0.,\n lambda_vel=0.,\n lambda_pose=1.,\n lambda_orient=1.,\n lambda_loc=1.,\n data_rep='rot6d',\n lambda_root_vel=0.,\n lambda_vel_rcxyz=0.,\n lambda_fc=0.,\n batch_size=32,\n multi_train_mode=None,\n bfgs_times_first = 5,\n bfgs_times_last = 10,\n bfgs_interval = 1,\n ):\n def masked_l2(self, a, b, mask):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None\n ):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_xstart_from_xprev(self, x_t, t, xprev):\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n def _scale_timesteps(self, t):\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n def p_sample_with_grad(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n arb_len=False,\n use_posterior = False,\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n predict_two_person=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n const_noise=False,\n use_posterior = False,\n ):\n def p_sample_multi(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n ):\n def handle_sample(_x, _out, _cond_fn, _t, _const_noise):\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_with_grad(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n dump_steps=None,\n const_noise=False,\n ):\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n ):\n def plms_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n cond_fn_with_grad=False,\n order=2,\n old_out=None,\n ):\n def get_model_output(x, t):\n def plms_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n order=2,\n ):\n def plms_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n skip_timesteps=0,\n init_image=None,\n randomize_class=False,\n cond_fn_with_grad=False,\n order=2,\n ):\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None):\n def fc_loss_rot_repr(self, gt_xyz, pred_xyz, mask):\n def to_np_cpu(x):\n def foot_contact_loss_humanml3d(self, target, model_output):\n def velocity_consistency_loss_humanml3d(self, target, model_output):\n def _prior_bpd(self, x_start):\n def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]\n B, C = x_t.shape[:2]" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "space_timesteps", "path": "diffusion/respace.py", "snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)" } ]
import torch from diffusion.control_diffusion import ControlGaussianDiffusion from model.cfg_sampler import wrap_model from model.mdm import MDM from model.ControlMDM import ControlMDM from diffusion import gaussian_diffusion as gd from diffusion.respace import SpacedDiffusion, space_timesteps
15,035
def load_model(args, data, device, ModelClass=MDM): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ModelClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model_wo_clip(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if 't_pos_encoder.pe' in missing_keys: missing_keys.remove('t_pos_encoder.pe') if 't_pos_encoder.pe' in unexpected_keys: unexpected_keys.remove('t_pos_encoder.pe') assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) def load_pretrained_mdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def load_pretrained_mdm_to_controlmdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) transformer_encoder_weight = {} for key, value in state_dict.items(): if key.startswith('seqTransEncoder'): transformer_encoder_weight[key[16:]] = value unexpected_keys.remove(key) model.seqTransEncoder_mdm.load_state_dict(transformer_encoder_weight, strict=True) model.seqTransEncoder_control.load_state_dict(transformer_encoder_weight, strict=True) assert len(unexpected_keys) == 0 #assert all([k.startswith('clip_model.') for k in missing_keys]) print("The following parameters are trained from scratch.") for k in missing_keys: if not k.startswith('clip_model.') and not k.startswith('seqTransEncoder'): print(k) def load_split_mdm(model, state_dict, cutting_point): new_state_dict = {} orig_trans_prefix = 'seqTransEncoder.' for k, v in state_dict.items(): if k.startswith(orig_trans_prefix): orig_layer = int(k.split('.')[2]) orig_suffix = '.'.join(k.split('.')[3:]) target_split = 'seqTransEncoder_start.' if orig_layer < cutting_point else 'seqTransEncoder_end.' target_layer = orig_layer if orig_layer < cutting_point else orig_layer - cutting_point new_k = target_split + 'layers.' + str(target_layer) + '.' + orig_suffix new_state_dict[new_k] = v else: new_state_dict[k] = v missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def create_model_and_diffusion(args, data, ModelClass=MDM, DiffusionClass=SpacedDiffusion): model = ModelClass(**get_model_args(args, data)) diffusion = create_gaussian_diffusion(args, DiffusionClass) return model, diffusion def get_model_args(args, data): # default args clip_version = 'ViT-B/32' action_emb = 'tensor' cond_mode = 'text' if args.dataset in ['humanml', 'kit','babel', 'pw3d'] else 'action' if hasattr(data.dataset, 'num_actions'): num_actions = data.dataset.num_actions else: num_actions = 1 # SMPL defaults data_rep = 'rot6d' njoints = 25 nfeats = 6 if args.dataset in ['humanml', 'pw3d']: data_rep = 'hml_vec' njoints = 263 nfeats = 1 elif args.dataset == 'babel': data_rep = 'rot6d' njoints = 135 nfeats = 1 elif args.dataset == 'kit': data_rep = 'hml_vec' njoints = 251 nfeats = 1 else: raise TypeError(f'dataset {args.dataset} is not currently supported') return {'modeltype': '', 'njoints': njoints, 'nfeats': nfeats, 'num_actions': num_actions, 'translation': True, 'pose_rep': 'rot6d', 'glob': True, 'glob_rot': True, 'latent_dim': args.latent_dim, 'ff_size': 1024, 'num_layers': args.layers, 'num_heads': 4, 'dropout': 0.1, 'activation': "gelu", 'data_rep': data_rep, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'action_emb': action_emb, 'arch': args.arch, 'emb_trans_dec': args.emb_trans_dec, 'clip_version': clip_version, 'dataset': args.dataset, 'diffusion-steps': args.diffusion_steps, 'batch_size': args.batch_size, 'use_tta': args.use_tta, 'trans_emb': args.trans_emb, 'concat_trans_emb': args.concat_trans_emb, 'args': args} def create_gaussian_diffusion(args, DiffusionClass=SpacedDiffusion): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = args.diffusion_steps scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False print(f"number of diffusion-steps: {steps}")
def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.mean = data.dataset.t2m_dataset.mean model.std = data.dataset.t2m_dataset.std model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model(args, data, device, ModelClass=MDM): model, diffusion = create_model_and_diffusion(args, data, ModelClass=ModelClass) model_path = args.model_path print(f"Loading checkpoints from [{model_path}]...") state_dict = torch.load(model_path, map_location='cpu') load_model_wo_clip(model, state_dict) model.to(device) model.eval() # disable random masking model = wrap_model(model, args) return model, diffusion def load_model_wo_clip(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) if 't_pos_encoder.pe' in missing_keys: missing_keys.remove('t_pos_encoder.pe') if 't_pos_encoder.pe' in unexpected_keys: unexpected_keys.remove('t_pos_encoder.pe') assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) def load_pretrained_mdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def load_pretrained_mdm_to_controlmdm(model, state_dict): missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) transformer_encoder_weight = {} for key, value in state_dict.items(): if key.startswith('seqTransEncoder'): transformer_encoder_weight[key[16:]] = value unexpected_keys.remove(key) model.seqTransEncoder_mdm.load_state_dict(transformer_encoder_weight, strict=True) model.seqTransEncoder_control.load_state_dict(transformer_encoder_weight, strict=True) assert len(unexpected_keys) == 0 #assert all([k.startswith('clip_model.') for k in missing_keys]) print("The following parameters are trained from scratch.") for k in missing_keys: if not k.startswith('clip_model.') and not k.startswith('seqTransEncoder'): print(k) def load_split_mdm(model, state_dict, cutting_point): new_state_dict = {} orig_trans_prefix = 'seqTransEncoder.' for k, v in state_dict.items(): if k.startswith(orig_trans_prefix): orig_layer = int(k.split('.')[2]) orig_suffix = '.'.join(k.split('.')[3:]) target_split = 'seqTransEncoder_start.' if orig_layer < cutting_point else 'seqTransEncoder_end.' target_layer = orig_layer if orig_layer < cutting_point else orig_layer - cutting_point new_k = target_split + 'layers.' + str(target_layer) + '.' + orig_suffix new_state_dict[new_k] = v else: new_state_dict[k] = v missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') or k.startswith('multi_person.') for k in missing_keys]) def create_model_and_diffusion(args, data, ModelClass=MDM, DiffusionClass=SpacedDiffusion): model = ModelClass(**get_model_args(args, data)) diffusion = create_gaussian_diffusion(args, DiffusionClass) return model, diffusion def get_model_args(args, data): # default args clip_version = 'ViT-B/32' action_emb = 'tensor' cond_mode = 'text' if args.dataset in ['humanml', 'kit','babel', 'pw3d'] else 'action' if hasattr(data.dataset, 'num_actions'): num_actions = data.dataset.num_actions else: num_actions = 1 # SMPL defaults data_rep = 'rot6d' njoints = 25 nfeats = 6 if args.dataset in ['humanml', 'pw3d']: data_rep = 'hml_vec' njoints = 263 nfeats = 1 elif args.dataset == 'babel': data_rep = 'rot6d' njoints = 135 nfeats = 1 elif args.dataset == 'kit': data_rep = 'hml_vec' njoints = 251 nfeats = 1 else: raise TypeError(f'dataset {args.dataset} is not currently supported') return {'modeltype': '', 'njoints': njoints, 'nfeats': nfeats, 'num_actions': num_actions, 'translation': True, 'pose_rep': 'rot6d', 'glob': True, 'glob_rot': True, 'latent_dim': args.latent_dim, 'ff_size': 1024, 'num_layers': args.layers, 'num_heads': 4, 'dropout': 0.1, 'activation': "gelu", 'data_rep': data_rep, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'action_emb': action_emb, 'arch': args.arch, 'emb_trans_dec': args.emb_trans_dec, 'clip_version': clip_version, 'dataset': args.dataset, 'diffusion-steps': args.diffusion_steps, 'batch_size': args.batch_size, 'use_tta': args.use_tta, 'trans_emb': args.trans_emb, 'concat_trans_emb': args.concat_trans_emb, 'args': args} def create_gaussian_diffusion(args, DiffusionClass=SpacedDiffusion): # default params predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal! steps = args.diffusion_steps scale_beta = 1. # no scaling timestep_respacing = '' # can be used for ddim sampling, we don't use it. learn_sigma = False rescale_timesteps = False print(f"number of diffusion-steps: {steps}")
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
3
2023-11-27 05:28:02+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,344
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size) reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size) reference_control_reader = ReferenceAttentionControl(self.unet, do_classifier_free_guidance=True, mode='read', batch_size=context_batch_size) is_dist_initialized = kwargs.get("dist", False) rank = kwargs.get("rank", 0) world_size = kwargs.get("world_size", 1) # Prepare video assert num_videos_per_prompt == 1 # FIXME: verify if num_videos_per_prompt > 1 works assert batch_size == 1 # FIXME: verify if batch_size > 1 works control = self.prepare_condition( condition=controlnet_condition, device=device, dtype=controlnet.dtype, num_videos_per_prompt=num_videos_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, ) controlnet_uncond_images, controlnet_cond_images = control.chunk(2) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables if init_latents is not None: latents = rearrange(init_latents, "(b f) c h w -> b c f h w", f=video_length) else: num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype # Prepare extra step kwargs. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # Prepare text embeddings for controlnet controlnet_text_embeddings = text_embeddings.repeat_interleave(video_length, 0) _, controlnet_text_embeddings_c = controlnet_text_embeddings.chunk(2) controlnet_res_samples_cache_dict = {i:None for i in range(video_length)} # For img2img setting if num_actual_inference_steps is None: num_actual_inference_steps = num_inference_steps if isinstance(source_image, str): ref_image_latents = self.images2latents(np.array(Image.open(source_image).resize((width, height)))[None, :], latents_dtype).cuda() elif isinstance(source_image, np.ndarray): ref_image_latents = self.images2latents(source_image[None, :], latents_dtype).cuda()
context_scheduler = get_context_scheduler(context_schedule)
3
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,808
If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https:
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https:
raise ProxySchemeUnsupported(
6
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "SUPPORT_CHAT", "path": "info.py", "snippet": "SUPPORT_CHAT = environ.get('SUPPORT_CHAT', 'NobiDeveloperSupport')" }, { "identifier": "MELCOW_NEW_USERS", "path": "info.py", "snippet": "MELCOW_NEW_USERS = is_enabled((environ.get('MELCOW_NEW_USERS', \"True\")), True)" }, { "identifier": "MELCOW_VID", "path": "info.py", "snippet": "MELCOW_VID = environ.get(\"MELCOW_VID\", \"https://telegra.ph/file/61ef9818986cef9554017.jpg\")" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://telegram.me/NobiDeveloper')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://telegram.me/NobiDeveloperSupport')" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" }, { "identifier": "Media", "path": "database/ia_filterdb.py", "snippet": "class Media(Document):\n file_id = fields.StrField(attribute='_id')\n file_ref = fields.StrField(allow_none=True)\n file_name = fields.StrField(required=True)\n file_size = fields.IntField(required=True)\n file_type = fields.StrField(allow_none=True)\n mime_type = fields.StrField(allow_none=True)\n caption = fields.StrField(allow_none=True)\n\n class Meta:\n indexes = ('$file_name', )\n collection_name = COLLECTION_NAME" }, { "identifier": "get_size", "path": "utils.py", "snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])" }, { "identifier": "temp", "path": "utils.py", "snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}" }, { "identifier": "get_settings", "path": "utils.py", "snippet": "async def get_settings(group_id):\n settings = temp.SETTINGS.get(group_id)\n if not settings:\n settings = await db.get_settings(group_id)\n temp.SETTINGS[group_id] = settings\n return settings" }, { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\n START_TXT = \"\"\"\n<b>{},\n\nɪ ᴄᴀɴ ᴘʀᴏᴠɪᴅᴇ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs,\nᴊᴜsᴛ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴀɴᴅ ᴇɴᴊᴏʏ 😍\n\n💞 ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href='https://telegram.me/MovieVillaYT'>ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ</a></b>\n\"\"\"\n\n HELP_TXT = \"\"\"\n<b>{},\n\n/g_info - ᴛᴏ ᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴠᴀʟᴜᴇꜱ\n/set_tutorial - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ᴛᴜᴛᴏʀɪᴀʟ\n/set_shortlink - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ꜱʜᴏʀᴛᴇɴᴇʀ\n/rem_tutorial - ᴛᴏ ʀᴇᴍᴏᴠᴇ ᴛᴜᴛᴏʀɪᴀʟ ʟɪɴᴋ\n</b>\"\"\"\n\n ABOUT_TXT = \"\"\"<b>➣ ᴍʏ ɴᴀᴍᴇ ⋟</b> {}\n<b>➢ ᴄʀᴇᴀᴛᴏʀ ⋟</b> <a href=https://youtube.com/@NobiDeveloper>𝘔𝘖𝘝𝘐𝘌 𝘝𝘐𝘓𝘓𝘈</a>\n<b>➣ ʟɪʙʀᴀʀʏ ⋟</b> 𝘱𝘺𝘳𝘰𝘨𝘳𝘢𝘮\n<b>➢ ʟᴀɴɢᴜᴀɢᴇ ⋟</b> 𝘱𝘺𝘵𝘩𝘰𝘯 3\n<b>➣ ᴅᴀᴛᴀʙᴀsᴇ ⋟</b> 𝘮𝘰𝘯𝘨𝘰 𝘥𝘣\n<b>➢ ʙᴏᴛ sᴇʀᴠᴇʀ ⋟</b> 𝘩𝘦𝘳𝘰𝘬𝘶\n<b>➣ ʙᴜɪʟᴅ sᴛᴀᴛs ⋟</b> 𝘷2.0.1 ﹝ʙᴇᴛᴀ﹞\"\"\"\n\n SOURCE_TXT = \"\"\"\n<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.</b>\n\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪʙʙᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ᴏʀ ʙʏ ᴀɴʏ ᴍᴇᴀɴꜱ, ꜱʜᴀʀᴇ, ᴏʀ ᴄᴏɴꜱᴜᴍᴇ, ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\n\n<b><a href=https://telegram.me/NobiDeveloper>~ ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ @MovieVillaYT</a></b>\n\"\"\"\n\n MANUELFILTER_TXT = \"\"\"\n<b>{},\n\n~ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ꜰᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\n~ ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇs sᴇᴛᴛɪɴɢs.\n\n~ ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʏᴏᴜ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\nᴄᴏᴍᴍᴀɴᴅs ᴀɴᴅ ᴜsᴀɢᴇ -\n\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ.</b>\n\"\"\"\n\n GROUP_TXT = \"\"\"\n<b>⍟ ᴄʜᴀɴɴᴇʟs ᴀɴᴅ ɢʀᴏᴜᴘs ᴍᴏᴅᴜʟᴇ ⍟</b>\n\n<b>🍿 ᴍᴏᴠɪᴇꜱ ᴄʜᴀɴɴᴇʟ.\n🗣️ ʙᴏᴛ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ.\n🚦 ʙᴏᴛ ᴜᴘᴅᴀᴛᴇs ᴄʜᴀɴɴᴇʟ.\n🎬 ᴍᴏᴠɪᴇ ʀᴇǫᴜᴇsᴛɪɴɢ ɢʀᴏᴜᴘ.</b>\"\"\"\n\n BUTTON_TXT = \"\"\"\n<b>💵 ɪ ʀᴇǫᴜᴇsᴛᴇᴅ ᴛᴏ ʏᴏᴜ 💸\n\nᴘʟᴇᴀsᴇ ᴅᴏɴᴀᴛᴇ ᴛʜᴇ ᴅᴇᴠᴇʟᴏᴘᴇʀ ꜰᴏʀ ᴋᴇᴇᴘɪɴɢ ᴛʜᴇ sᴇʀᴠɪᴄᴇ ᴀʟɪᴠᴇ & ᴋᴇᴇᴘ ʙʀɪɴɢɪɴɢ ᴍᴏʀᴇ ɴᴇᴡ ꜰᴇᴀᴛᴜʀᴇs ꜰᴏʀ ʏᴏᴜ....</b>\n\n𝐘𝐨𝐮 𝐂𝐚𝐧 𝐃𝐨𝐧𝐚𝐭𝐞 𝐀𝐧𝐲 𝐀𝐦𝐨𝐮𝐧𝐭 𝐘𝐨𝐮 𝐇𝐚𝐯𝐞 💷\n\n<b>᚜ ᴘᴀʏᴍᴇɴᴛ ᴍᴇᴛʜᴏᴅs ᚛</b>\n\n💵 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗚𝗼𝗼𝗴𝗹𝗲 𝗣𝗮𝘆</a>\n💸 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗮𝘆𝘁𝗺</a>\n💶 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗵𝗼𝗻𝗲𝗣𝗲</a>\n\n𝐂𝐨𝐧𝐭𝐚𝐜𝐭 𝐌𝐞 𝐅𝐨𝐫 𝐊𝐧𝐨𝐰 𝐀𝐛𝐨𝐮𝐭 𝐓𝐡𝐞 𝐏𝐚𝐲𝐦𝐞𝐧𝐭 𝐈𝐧𝐟𝐨\n\n<b>ᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a>\nᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a></b>\"\"\"\n\n AUTOFILTER_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ</b>\n<b>ɴᴏᴛᴇ: Fɪʟᴇ Iɴᴅᴇx</b>\n1. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\n2. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\n3. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ Qᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\n\n<b>Nᴏᴛᴇ: AᴜᴛᴏFɪʟᴛᴇʀ</b>\n1. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n2. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\n3. Usᴇ /settings ᴏɴ ʙᴏᴛ's PM ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\n\n CONNECTION_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\n- ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \n- ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\n<b>ɴᴏᴛᴇ:</b>\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\n2. ꜱᴇɴᴅ <code>/ᴄᴏɴɴᴇᴄᴛ</code> ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /connect - <code>ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ</code>\n• /disconnect - <code>ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ</code>\n• /connections - <code>ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</code>\"\"\"\n\n EXTRAMOD_TXT = \"\"\"ʜᴇʟᴘ: Exᴛʀᴀ Mᴏᴅᴜʟᴇs\n<b>ɴᴏᴛᴇ:</b>\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /id - <code>ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.</code>\n• /info - <code>ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.</code>\n• /imdb - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.</code>\n• /search - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.</code>\"\"\"\n\n ADMIN_TXT = \"\"\"ʜᴇʟᴘ: Aᴅᴍɪɴ Mᴏᴅs\n<b>ɴᴏᴛᴇ:</b>\nTʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /logs - <code>ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ</code>\n• /stats - <code>ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delete - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.</code>\n• /users - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.</code>\n• /chats - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ</code>\n• /leave - <code>ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.</code>\n• /disable - <code>ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.</code>\n• /ban - <code>ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /unban - <code>ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /channel - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ</code>\n• /broadcast - <code>ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ</code>\n• /grp_broadcast - <code>Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.</code>\n• /gfilter - <code>ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /gfilters - <code>ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /delg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ</code>\n• /request - <code>Tᴏ sᴇɴᴅ ᴀ Mᴏᴠɪᴇ/Sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. Oɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delallg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ Gғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\n• /deletefiles - <code>Tᴏ ᴅᴇʟᴇᴛᴇ CᴀᴍRɪᴘ ᴀɴᴅ PʀᴇDVD Fɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\"\"\"\n\n STATUS_TXT = \"\"\"<b>📂 ᴛᴏᴛᴀʟ ꜰɪʟᴇs: <code>{}</code>\n👤 ᴛᴏᴛᴀʟ ᴜsᴇʀs: <code>{}</code>\n♻️ ᴛᴏᴛᴀʟ ᴄʜᴀᴛs: <code>{}</code>\n🗃️ ᴜsᴇᴅ sᴛᴏʀᴀɢᴇ: <code>{}</code>\n🆓 ꜰʀᴇᴇ sᴛᴏʀᴀɢᴇ: <code>{}</code></b>\"\"\"\n\n LOG_TEXT_G = \"\"\"#𝐍𝐞𝐰𝐆𝐫𝐨𝐮𝐩\n\n<b>᚛› 𝐆𝐫𝐨𝐮𝐩 ⪼ {}(<code>{}</code>)</b>\n<b>᚛› 𝐓𝐨𝐭𝐚𝐥 𝐌𝐞𝐦𝐛𝐞𝐫𝐬 ⪼ <code>{}</code></b>\n<b>᚛› 𝐀𝐝𝐝𝐞𝐝 𝐁𝐲 ⪼ {}</b>\n\"\"\"\n\n LOG_TEXT_P = \"\"\"#𝐍𝐞𝐰𝐔𝐬𝐞𝐫\n\n<b>᚛› 𝐈𝐃 - <code>{}</code></b>\n<b>᚛› 𝐍𝐚𝐦𝐞 - {}</b>\n\"\"\"\n\n ALRT_TXT = \"\"\"{},\nᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴏᴡɴ ʀᴇǫᴜᴇ𝗌ᴛ 😤\n\"\"\"\n\n OLD_ALRT_TXT =\"\"\"{},\n\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇ,\n\nꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇ𝗌ᴛ ᴀɢᴀɪɴ 😊\n\"\"\"\n\n CUDNT_FND = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆𝘁𝗵𝗶𝗻𝗴 𝗿𝗲𝗹𝗮𝘁𝗲𝗱 𝘁𝗼 𝘁𝗵𝗮𝘁 𝗱𝗶𝗱 𝘆𝗼𝘂 𝗺𝗲𝗮𝗻 𝗮𝗻𝘆 𝗼𝗻𝗲 𝗼𝗳 𝘁𝗵𝗲𝘀𝗲 ?? 👇\"\"\"\n\n I_CUDNT = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆 𝗺𝗼𝘃𝗶𝗲 𝗼𝗿 𝘀𝗲𝗿𝗶𝗲𝘀 𝗶𝗻 𝘁𝗵𝗮𝘁 𝗻𝗮𝗺𝗲.. 😐\"\"\"\n\n I_CUD_NT = \"\"\"ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...\"\"\"\n\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ...\n\n<u>ʀᴇᴀꜱᴏɴꜱ:</u></b>\n\n𝟷) ꜱᴘᴇʟʟɪɴɢ ᴍɪꜱᴛᴀᴋᴇ\n\n𝟸) ᴏᴛᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀꜱᴇᴅ\n\n𝟹) ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ\n\n<b><a href=https://telegram.me/NobiDeveloperr>~ ʀᴇǫᴜᴇ𝗌ᴛ ᴛᴏ ᴏᴡɴᴇʀ</a></b>\n\"\"\"\n\n TOP_ALRT_MSG = \"\"\"ꜱᴇᴀʀᴄʜɪɴɢ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...\"\"\"\n\n MELCOW_ENG = \"\"\"<b>{},\n\n📿 ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ᴏᴜʀ ɢʀᴏᴜᴘ {}\n\n🚬 ᴛʜɪs ɪs ᴀ ᴍᴏᴠɪᴇ ɢʀᴏᴜᴘ\n\n⏳ ᴀʟʟ ᴄᴀᴛᴇɢᴏʀɪᴇs ᴏꜰ ᴍᴏᴠɪᴇs ᴀᴠᴀɪʟᴀʙʟᴇ ʜᴇʀᴇ\n\n🧨 ᴊᴜsᴛ ᴛʏᴘᴇ ᴛʜᴇ ᴍᴏᴠɪᴇ ɴᴀᴍᴇ\n\n🤖 ʙᴏᴛ ᴡɪʟʟ sᴇɴᴅ ʏᴏᴜʀ ᴍᴏᴠɪᴇ\n\n☎️ ʀᴇᴀᴅ ɢʀᴏᴜᴘ ʀᴜʟᴇs ᴛᴏ ᴋɴᴏᴡ ᴍᴏʀᴇ...</b>\"\"\"\n\n SHORTLINK_INFO = \"\"\"\n<b>──────「 <a href='https://telegram.me/NobiDeveloper'>ᴇᴀʀɴ ᴍᴏɴᴇʏ</a> 」──────\n\n➥ ɴᴏᴡ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴇᴀʀɴ ʟᴏᴛs ᴏꜰ ᴍᴏɴᴇʏ ꜰʀᴏᴍ ᴛʜɪꜱ ʙᴏᴛ.\n\n›› sᴛᴇᴘ 𝟷 : ʏᴏᴜ ᴍᴜsᴛ ʜᴀᴠᴇ ᴀᴛʟᴇᴀsᴛ ᴏɴᴇ ɢʀᴏᴜᴘ ᴡɪᴛʜ ᴍɪɴɪᴍᴜᴍ 𝟹𝟶𝟶 ᴍᴇᴍʙᴇʀs.\n\n›› sᴛᴇᴘ 𝟸 : ᴍᴀᴋᴇ ᴀᴄᴄᴏᴜɴᴛ ᴏɴ <a href='https://tnshort.net/ref/devilofficial'>ᴛɴʟɪɴᴋ</a> ᴏʀ <a href='https://onepagelink.in/ref/Nobita'>ᴏɴᴇᴘᴀɢᴇʟɪɴᴋ</a>. [ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴜsᴇ ᴏᴛʜᴇʀ sʜᴏʀᴛɴᴇʀ ᴡᴇʙsɪᴛᴇ ]\n\n›› sᴛᴇᴘ 𝟹 : ꜰᴏʟʟᴏᴡ ᴛʜᴇsᴇ <a href='https://telegram.me/NobiDeveloper/1063'>ɪɴꜱᴛʀᴜᴄᴛɪᴏɴꜱ</a>.\n\n➥ ᴛʜɪꜱ ʙᴏᴛ ꜰʀᴇᴇ ꜰᴏʀ ᴀʟʟ ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ʙᴏᴛ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘs ꜰʀᴇᴇ ᴏꜰ ᴄᴏꜱᴛ.</b>\"\"\"\n\n REQINFO = \"\"\"\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\n\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\n\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\n\n SELECT = \"\"\"\nMOVIES ➢ Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\"\n\nSERIES ➢ Sᴇʟᴇᴄᴛ \"Sᴇᴀsᴏɴs\"\n\nTɪᴘ: Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\" ᴏʀ \"Sᴇᴀsᴏɴs\" Bᴜᴛᴛᴏɴ ᴀɴᴅ Cʟɪᴄᴋ \"Sᴇɴᴅ Aʟʟ\" Tᴏ ɢᴇᴛ Aʟʟ Fɪʟᴇ Lɪɴᴋs ɪɴ ᴀ Sɪɴɢʟᴇ ᴄʟɪᴄᴋ\"\"\"\n\n SINFO = \"\"\"\n▣ ᴛɪᴘs ▣\n\n☆ ᴛʏᴘᴇ ᴄᴏʀʀᴇᴄᴛ sᴘᴇʟʟɪɴɢ (ɢᴏᴏɢʟᴇ)\n\n☆ ɪꜰ ʏᴏᴜ ɴᴏᴛ ɢᴇᴛ ʏᴏᴜʀ ꜰɪʟᴇ ɪɴ ᴛʜɪꜱ ᴘᴀɢᴇ ᴛʜᴇɴ ᴄʟɪᴄᴋ ᴏɴ ɴᴇxᴛ ʙᴜᴛᴛᴏɴ\n\n☆ ᴄᴏɴᴛɪɴᴜᴇ ᴛʜɪs ᴍᴇᴛʜᴏᴅ ᴛᴏ ɢᴇᴛᴛɪɴɢ ʏᴏᴜ ꜰɪʟᴇ\n\n❤️‍🔥 ᴘᴏᴡᴇʀᴇᴅ ʙʏ @NobiDeveloper\n\"\"\"\n\n NORSLTS = \"\"\"\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\n\n𝗜𝗗 <b>: {}</b>\n𝗡𝗮𝗺𝗲 <b>: {}</b>\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>\"\"\"\n\n CAPTION = \"\"\"\n[{file_name}](https://telegram.me/NobiDeveloper)\n\n<b>•────•────────•────•\n📌 ʀᴇǫᴜᴇsᴛ ɢʀᴏᴜᴘ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/AllRequestGroups)\n🎬 ᴍᴏᴠɪᴇs ᴄʜᴀɴɴᴇʟ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/MovieVillaYT)\n•────•────────•────•\n\n©️ ᴘᴏᴡᴇʀᴇᴅ ʙʏ : [ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ](https://youtube.com/@NobiDeveloper)</b>\"\"\"\n\n IMDB_TEMPLATE_TXT = \"\"\"\n<b>{title}</b>\n\n⭐️<b>{rating}</b> | ⏰ <b>{runtime}</b> | 📆 <b>{release_date}</b>\n\n● <b>{genres}</b>\n● <b>{languages}</b>\n\n📖 sᴛᴏʀʏ : <b>{plot}</b> \n\n© {message.chat.title}\n\"\"\"\n \n ALL_FILTERS = \"\"\"\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\n \n GFILTER_TXT = \"\"\"\n<b>Wᴇʟᴄᴏᴍᴇ ᴛᴏ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs. Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.</b>\n \nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /gfilter - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /gfilters - <code>Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.</code>\n• /delg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /delallg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.</code>\"\"\"\n \n FILE_STORE_TXT = \"\"\"\n<b>Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</b>\n\nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /batch - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</code>\n• /link - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.</code>\n• /pbatch - <code>Jᴜsᴛ ʟɪᴋᴇ /batch, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.</code>\n• /plink - <code>Jᴜsᴛ ʟɪᴋᴇ /link, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.</code>\"\"\"\n\n RESTART_TXT = \"\"\"\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\n\n📅 Dᴀᴛᴇ : <code>{}</code>\n⏰ Tɪᴍᴇ : <code>{}</code>\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>v2.7.1 [ Sᴛᴀʙʟᴇ ]</code></b>\n\"\"\"\n\n LOGO = \"\"\"\n𝑺𝒕𝒂𝒓𝒕𝒊𝒏𝒈.......🥵\"\"\"" } ]
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
14,685
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous"
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous"
await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j))
12
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "instantiate_from_config", "path": "data/dataset_instantiate.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(config.get(\"params\", dict()))" }, { "identifier": "calculate_psnr_ssim", "path": "metrics/metrics_all.py", "snippet": "def calculate_psnr_ssim(gt_path, restored_path, test_y_channel = False, crop_border = 0, suffix = '', correct_mean_var = False, show_details =False):\n \"\"\"\n Calculate PSNR and SSIM for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n test_y_channel: If True, test Y channel (In MatLab YCbCr format). If False, test RGB channels.\n crop_border: Crop border for each side\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate PSNR and SSIM for images\")\n psnr_all = []\n ssim_all = []\n img_list_gt = sorted(list(scandir(gt_path, recursive=True, full_path=True)))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n if test_y_channel:\n print('Testing Y channel.')\n else:\n print('Testing RGB channels.')\n\n for i, img_path in tqdm(enumerate(img_list_gt)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255.\n img_restored\n if correct_mean_var:\n mean_l = []\n std_l = []\n for j in range(3):\n mean_l.append(np.mean(img_gt[:, :, j]))\n std_l.append(np.std(img_gt[:, :, j]))\n for j in range(3):\n # correct twice\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n if test_y_channel and img_gt.ndim == 3 and img_gt.shape[2] == 3:\n img_gt = bgr2ycbcr(img_gt, y_only=True)\n img_restored = bgr2ycbcr(img_restored, y_only=True)\n\n # calculate PSNR and SSIM\n psnr = calculate_psnr(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n ssim = calculate_ssim(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n if show_details:\n print(f'{basename + suffix + ext:25}. \\tPSNR: {psnr:.6f} dB, \\tSSIM: {ssim:.6f}')\n psnr_all.append(psnr)\n ssim_all.append(ssim)\n Average_psnr = sum(psnr_all) / len(psnr_all)\n Average_ssim = sum(ssim_all) / len(ssim_all)\n print(f'PSNR: {Average_psnr:.6f} dB, SSIM: {Average_ssim:.6f}')\n return Average_psnr, Average_ssim" }, { "identifier": "calculate_lpips", "path": "metrics/metrics_all.py", "snippet": "def calculate_lpips(gt_path, restored_path, suffix = '', show_details =False):\n \"\"\"\n Calculate LPIPS for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate LPIPS for images\")\n loss_fn_vgg = lpips.LPIPS(net='vgg').cuda() # RGB, normalized to [-1,1]\n lpips_all = []\n img_list = sorted(glob.glob(osp.join(gt_path, '*')))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n for i, img_path in tqdm(enumerate(img_list)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. \n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255. \n\n img_gt, img_restored = img2tensor([img_gt, img_restored], bgr2rgb=True, float32=True)\n # norm to [-1, 1]\n normalize(img_gt, mean, std, inplace=True)\n normalize(img_restored, mean, std, inplace=True)\n\n # calculate lpips\n lpips_val = loss_fn_vgg(img_restored.unsqueeze(0).cuda(), img_gt.unsqueeze(0).cuda())\n lpips_val = lpips_val.cpu().item()\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tLPIPS: {lpips_val:.6f}.')\n lpips_all.append(lpips_val)\n Average_lpips = sum(lpips_all) / len(lpips_all)\n print(f'LPIPS: {Average_lpips:.6f}')\n return Average_lpips" }, { "identifier": "calculate_NIQE", "path": "metrics/metrics_all.py", "snippet": "def calculate_NIQE(restored_path, crop_border = 0, show_details =False):\n \"\"\"\n Calculate NIQE for images.\n restored_path: Path to restored images\n crop_border: Crop border for each side\n \"\"\"\n print(\"Calculate NIQE for images\")\n niqe_all = []\n img_list = sorted(scandir(restored_path, recursive=True, full_path=True))\n\n for i, img_path in tqdm(enumerate(img_list)):\n basename, _ = os.path.splitext(os.path.basename(img_path))\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=RuntimeWarning)\n niqe_score = calculate_niqe(img, crop_border, input_order='HWC', convert_to='y')\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tNIQE: {niqe_score:.6f}')\n niqe_all.append(niqe_score)\n Average_niqe = sum(niqe_all) / len(niqe_all)\n print(f'NIQE: {Average_niqe:.6f}')\n return Average_niqe " }, { "identifier": "calculate_fid_folder", "path": "metrics/metrics_all.py", "snippet": "def calculate_fid_folder(restored_path):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n fid_stats = ''\n batch_size = 64\n num_sample = 50000\n num_workers = 4\n backend = 'disk'\n\n # inception model\n inception = load_patched_inception_v3(device)\n\n # create dataset\n opt = {}\n opt['name'] = 'SingleImageDataset'\n opt['type'] = 'SingleImageDataset'\n opt['dataroot_lq'] = restored_path\n opt['io_backend'] = dict(type=backend)\n opt['mean'] = [0.5, 0.5, 0.5]\n opt['std'] = [0.5, 0.5, 0.5]\n dataset = build_dataset(opt)\n\n # create dataloader\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n sampler=None,\n drop_last=False)\n num_sample = min(num_sample, len(dataset))\n total_batch = math.ceil(num_sample / batch_size)\n\n def data_generator(data_loader, total_batch):\n for idx, data in enumerate(data_loader):\n if idx >= total_batch:\n break\n else:\n yield data['lq']\n\n features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)\n features = features.numpy()\n total_len = features.shape[0]\n features = features[:num_sample]\n print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')\n\n sample_mean = np.mean(features, 0)\n sample_cov = np.cov(features, rowvar=False)\n\n # load the dataset stats\n stats = torch.load(fid_stats)\n real_mean = stats['mean']\n real_cov = stats['cov']\n\n # calculate FID metric\n fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)\n print('fid:', fid)\n return fid" } ]
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,413
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = []
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path) model = instantiate_from_config(config.model).cpu() print(f'Loaded model config from [{config_path}]') return model class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, **kwargs): hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
0
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
basicsr/data/realbasicvsr_dataset.py
[ { "identifier": "Clip", "path": "basicsr/data/mmcv_transforms/aug_pix.py", "snippet": "class Clip(BaseTransform):\n \"\"\"Clip the pixels.\n\n Modified keys are the attributes specified in \"keys\".\n\n Args:\n keys (list[str]): The keys whose values are clipped.\n a_min (int): Lower limits of pixel value.\n a_max (int): Upper limits of pixel value.\n \"\"\"\n\n def __init__(self, keys, a_min=0, a_max=255):\n\n self.keys = keys\n self.a_min = a_min\n self.a_max = a_max\n\n def _clip(self, input_):\n \"\"\"Clip the pixels.\n\n Args:\n input_ (Union[List, np.ndarray]): Pixels to clip.\n\n Returns:\n Union[List, np.ndarray]: Clipped pixels.\n \"\"\"\n is_single_image = False\n if isinstance(input_, np.ndarray):\n is_single_image = True\n input_ = [input_]\n\n # clip\n input_ = [np.clip(v, self.a_min, self.a_max) for v in input_]\n\n if is_single_image:\n input_ = input_[0]\n\n return input_\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict with the values of the specified keys are rounded\n and clipped.\n \"\"\"\n\n for key in self.keys:\n results[key] = self._clip(results[key])\n\n return results\n\n def __repr__(self):\n\n result = self.__class__.__name__\n result += f'(a_min={self.a_min}, a_max={self.a_max})'\n\n return result" }, { "identifier": "UnsharpMasking", "path": "basicsr/data/mmcv_transforms/aug_pix.py", "snippet": "class UnsharpMasking(BaseTransform):\n \"\"\"Apply unsharp masking to an image or a sequence of images.\n\n Args:\n kernel_size (int): The kernel_size of the Gaussian kernel.\n sigma (float): The standard deviation of the Gaussian.\n weight (float): The weight of the \"details\" in the final output.\n threshold (float): Pixel differences larger than this value are\n regarded as \"details\".\n keys (list[str]): The keys whose values are processed.\n\n Added keys are \"xxx_unsharp\", where \"xxx\" are the attributes specified\n in \"keys\".\n \"\"\"\n\n def __init__(self, kernel_size, sigma, weight, threshold, keys):\n\n if kernel_size % 2 == 0:\n raise ValueError('kernel_size must be an odd number, but '\n f'got {kernel_size}.')\n\n self.kernel_size = kernel_size\n self.sigma = sigma\n self.weight = weight\n self.threshold = threshold\n self.keys = keys\n\n kernel = cv2.getGaussianKernel(kernel_size, sigma)\n self.kernel = np.matmul(kernel, kernel.transpose())\n\n def _unsharp_masking(self, imgs):\n \"\"\"Unsharp masking function.\"\"\"\n\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n outputs = []\n for img in imgs:\n img = img.astype(np.float32)\n residue = img - cv2.filter2D(img, -1, self.kernel)\n mask = np.float32(np.abs(residue) > self.threshold)\n soft_mask = cv2.filter2D(mask, -1, self.kernel)\n sharpened = np.clip(img + self.weight * residue, 0, 255)\n\n outputs.append(soft_mask * sharpened + (1 - soft_mask) * img)\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n \"\"\"\n for key in self.keys:\n # results[f'{key}_unsharp'] = self._unsharp_masking(results[key])\n results[key] = self._unsharp_masking(results[key])\n\n return results\n\n def __repr__(self):\n\n repr_str = self.__class__.__name__\n repr_str += (f'(keys={self.keys}, kernel_size={self.kernel_size}, '\n f'sigma={self.sigma}, weight={self.weight}, '\n f'threshold={self.threshold})')\n\n return repr_str" }, { "identifier": "RescaleToZeroOne", "path": "basicsr/data/mmcv_transforms/normalization.py", "snippet": "class RescaleToZeroOne(BaseTransform):\n \"\"\"Transform the images into a range between 0 and 1.\n\n Required keys are the keys in attribute \"keys\", added or modified keys are\n the keys in attribute \"keys\".\n It also supports rescaling a list of images.\n\n Args:\n keys (Sequence[str]): The images to be transformed.\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def transform(self, results):\n \"\"\"transform function.\n\n Args:\n results (dict): A dict containing the necessary information and\n data for augmentation.\n\n Returns:\n dict: A dict containing the processed data and information.\n \"\"\"\n for key in self.keys:\n if isinstance(results[key], list):\n results[key] = [\n v.astype(np.float32) / 255. for v in results[key]\n ]\n else:\n results[key] = results[key].astype(np.float32) / 255.\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'" }, { "identifier": "RandomBlur", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomBlur:\n \"\"\"Apply random blur to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n def get_kernel(self, num_kernels: int):\n \"\"\"This is the function to create kernel.\n\n Args:\n num_kernels (int): the number of kernels\n\n Returns:\n _type_: _description_\n \"\"\"\n kernel_type = np.random.choice(\n self.params['kernel_list'], p=self.params['kernel_prob'])\n kernel_size = random.choice(self.params['kernel_size'])\n\n sigma_x_range = self.params.get('sigma_x', [0, 0])\n sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])\n sigma_x_step = self.params.get('sigma_x_step', 0)\n\n sigma_y_range = self.params.get('sigma_y', [0, 0])\n sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])\n sigma_y_step = self.params.get('sigma_y_step', 0)\n\n rotate_angle_range = self.params.get('rotate_angle', [-np.pi, np.pi])\n rotate_angle = np.random.uniform(rotate_angle_range[0],\n rotate_angle_range[1])\n rotate_angle_step = self.params.get('rotate_angle_step', 0)\n\n beta_gau_range = self.params.get('beta_gaussian', [0.5, 4])\n beta_gau = np.random.uniform(beta_gau_range[0], beta_gau_range[1])\n beta_gau_step = self.params.get('beta_gaussian_step', 0)\n\n beta_pla_range = self.params.get('beta_plateau', [1, 2])\n beta_pla = np.random.uniform(beta_pla_range[0], beta_pla_range[1])\n beta_pla_step = self.params.get('beta_plateau_step', 0)\n\n omega_range = self.params.get('omega', None)\n omega_step = self.params.get('omega_step', 0)\n if omega_range is None: # follow Real-ESRGAN settings if not specified\n if kernel_size < 13:\n omega_range = [np.pi / 3., np.pi]\n else:\n omega_range = [np.pi / 5., np.pi]\n omega = np.random.uniform(omega_range[0], omega_range[1])\n\n # determine blurring kernel\n kernels = []\n for _ in range(0, num_kernels):\n kernel = random_mixed_kernels(\n [kernel_type],\n [1],\n kernel_size,\n [sigma_x, sigma_x],\n [sigma_y, sigma_y],\n [rotate_angle, rotate_angle],\n [beta_gau, beta_gau],\n [beta_pla, beta_pla],\n [omega, omega],\n None,\n )\n kernels.append(kernel)\n\n # update kernel parameters\n sigma_x += np.random.uniform(-sigma_x_step, sigma_x_step)\n sigma_y += np.random.uniform(-sigma_y_step, sigma_y_step)\n rotate_angle += np.random.uniform(-rotate_angle_step,\n rotate_angle_step)\n beta_gau += np.random.uniform(-beta_gau_step, beta_gau_step)\n beta_pla += np.random.uniform(-beta_pla_step, beta_pla_step)\n omega += np.random.uniform(-omega_step, omega_step)\n\n sigma_x = np.clip(sigma_x, sigma_x_range[0], sigma_x_range[1])\n sigma_y = np.clip(sigma_y, sigma_y_range[0], sigma_y_range[1])\n rotate_angle = np.clip(rotate_angle, rotate_angle_range[0],\n rotate_angle_range[1])\n beta_gau = np.clip(beta_gau, beta_gau_range[0], beta_gau_range[1])\n beta_pla = np.clip(beta_pla, beta_pla_range[0], beta_pla_range[1])\n omega = np.clip(omega, omega_range[0], omega_range[1])\n\n return kernels\n\n def _apply_random_blur(self, imgs):\n \"\"\"This is the function to apply blur operation on images.\n\n Args:\n imgs (Tensor): images\n\n Returns:\n Tensor: Images applied blur\n \"\"\"\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n # get kernel and blur the input\n kernels = self.get_kernel(num_kernels=len(imgs))\n imgs = [\n cv2.filter2D(img, -1, kernel)\n for img, kernel in zip(imgs, kernels)\n ]\n\n if is_single_image:\n imgs = imgs[0]\n\n return imgs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_blur(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomResize", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomResize:\n \"\"\"Randomly resize the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n self.resize_dict = dict(\n bilinear=cv2.INTER_LINEAR,\n bicubic=cv2.INTER_CUBIC,\n area=cv2.INTER_AREA,\n lanczos=cv2.INTER_LANCZOS4)\n\n def _random_resize(self, imgs):\n \"\"\"This is the function used to randomly resize images for training\n augmentation.\n\n Args:\n imgs (Tensor): training images.\n\n Returns:\n Tensor: images after randomly resized\n \"\"\"\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n h, w = imgs[0].shape[:2]\n\n resize_opt = self.params['resize_opt']\n resize_prob = self.params['resize_prob']\n resize_opt = np.random.choice(resize_opt, p=resize_prob).lower()\n if resize_opt not in self.resize_dict:\n raise NotImplementedError(f'resize_opt [{resize_opt}] is not '\n 'implemented')\n resize_opt = self.resize_dict[resize_opt]\n\n resize_step = self.params.get('resize_step', 0)\n\n # determine the target size, if not provided\n target_size = self.params.get('target_size', None)\n if target_size is None:\n resize_mode = np.random.choice(['up', 'down', 'keep'],\n p=self.params['resize_mode_prob'])\n resize_scale = self.params['resize_scale']\n if resize_mode == 'up':\n scale_factor = np.random.uniform(1, resize_scale[1])\n elif resize_mode == 'down':\n scale_factor = np.random.uniform(resize_scale[0], 1)\n else:\n scale_factor = 1\n\n # determine output size\n h_out, w_out = h * scale_factor, w * scale_factor\n if self.params.get('is_size_even', False):\n h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)\n target_size = (int(h_out), int(w_out))\n else:\n resize_step = 0\n\n # resize the input\n if resize_step == 0: # same target_size for all input images\n outputs = [\n cv2.resize(img, target_size[::-1], interpolation=resize_opt)\n for img in imgs\n ]\n else: # different target_size for each input image\n outputs = []\n for img in imgs:\n img = cv2.resize(\n img, target_size[::-1], interpolation=resize_opt)\n outputs.append(img)\n\n # update scale\n scale_factor += np.random.uniform(-resize_step, resize_step)\n scale_factor = np.clip(scale_factor, resize_scale[0],\n resize_scale[1])\n\n # determine output size\n h_out, w_out = h * scale_factor, w * scale_factor\n if self.params.get('is_size_even', False):\n h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)\n target_size = (int(h_out), int(w_out))\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._random_resize(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomNoise", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomNoise:\n \"\"\"Apply random noise to the input.\n\n Currently support Gaussian noise and Poisson noise.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n self.keys = keys\n self.params = params\n\n def _apply_gaussian_noise(self, imgs):\n \"\"\"This is the function used to apply gaussian noise on images.\n\n Args:\n imgs (Tensor): images\n\n Returns:\n Tensor: images applied gaussian noise\n \"\"\"\n sigma_range = self.params['gaussian_sigma']\n sigma = np.random.uniform(sigma_range[0], sigma_range[1])\n\n sigma_step = self.params.get('gaussian_sigma_step', 0)\n\n gray_noise_prob = self.params['gaussian_gray_noise_prob']\n is_gray_noise = np.random.uniform() < gray_noise_prob\n\n outputs = []\n for img in imgs:\n noise = np.float32(np.random.randn(*(img.shape))) * sigma\n if is_gray_noise:\n noise = noise[:, :, :1]\n outputs.append(img + noise)\n\n # update noise level\n sigma += np.random.uniform(-sigma_step, sigma_step)\n sigma = np.clip(sigma, sigma_range[0], sigma_range[1])\n\n return outputs\n\n def _apply_poisson_noise(self, imgs):\n scale_range = self.params['poisson_scale']\n scale = np.random.uniform(scale_range[0], scale_range[1])\n\n scale_step = self.params.get('poisson_scale_step', 0)\n\n gray_noise_prob = self.params['poisson_gray_noise_prob']\n is_gray_noise = np.random.uniform() < gray_noise_prob\n\n outputs = []\n for img in imgs:\n noise = np.float32(img.copy())\n if is_gray_noise:\n noise = cv2.cvtColor(noise[..., [2, 1, 0]], cv2.COLOR_BGR2GRAY)\n noise = noise[..., np.newaxis]\n noise = np.clip((noise).round(), 0, 255)\n unique_val = 2**np.ceil(np.log2(len(np.unique(noise))))\n noise = np.random.poisson(noise * unique_val).astype(np.float32) \\\n / unique_val - noise\n\n outputs.append(img + noise * scale)\n\n # update noise level\n scale += np.random.uniform(-scale_step, scale_step)\n scale = np.clip(scale, scale_range[0], scale_range[1])\n\n return outputs\n\n def _apply_random_noise(self, imgs):\n \"\"\"This is the function used to apply random noise on images.\n\n Args:\n imgs (Tensor): training images\n\n Returns:\n _type_: _description_\n \"\"\"\n noise_type = np.random.choice(\n self.params['noise_type'], p=self.params['noise_prob'])\n\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n if noise_type.lower() == 'gaussian':\n imgs = self._apply_gaussian_noise(imgs)\n elif noise_type.lower() == 'poisson':\n imgs = self._apply_poisson_noise(imgs)\n else:\n raise NotImplementedError(f'\"noise_type\" [{noise_type}] is '\n 'not implemented.')\n\n if is_single_image:\n imgs = imgs[0]\n\n return imgs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_noise(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomJPEGCompression", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomJPEGCompression:\n \"\"\"Apply random JPEG compression to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n bgr2rgb (str): Whether change channel order. Default: False.\n \"\"\"\n\n def __init__(self, params, keys, color_type='color', bgr2rgb=False):\n self.keys = keys\n self.params = params\n self.color_type = color_type\n self.bgr2rgb = bgr2rgb\n\n def _apply_random_compression(self, imgs):\n is_single_image = False\n if isinstance(imgs, np.ndarray):\n is_single_image = True\n imgs = [imgs]\n\n # determine initial compression level and the step size\n quality = self.params['quality']\n quality_step = self.params.get('quality_step', 0)\n jpeg_param = round(np.random.uniform(quality[0], quality[1]))\n\n # apply jpeg compression\n outputs = []\n for img in imgs:\n encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_param]\n if self.bgr2rgb and self.color_type == 'color':\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n _, img_encoded = cv2.imencode('.jpg', img, encode_param)\n\n if self.color_type == 'color':\n img_encoded = cv2.imdecode(img_encoded, 1)\n if self.bgr2rgb:\n img_encoded = cv2.cvtColor(img_encoded, cv2.COLOR_BGR2RGB)\n outputs.append(img_encoded)\n else:\n outputs.append(cv2.imdecode(img_encoded, 0))\n\n # update compression level\n jpeg_param += np.random.uniform(-quality_step, quality_step)\n jpeg_param = round(np.clip(jpeg_param, quality[0], quality[1]))\n\n if is_single_image:\n outputs = outputs[0]\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_compression(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "RandomVideoCompression", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class RandomVideoCompression:\n \"\"\"Apply random video compression to the input.\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n params (dict): A dictionary specifying the degradation settings.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n \"\"\"\n\n def __init__(self, params, keys):\n assert has_av, 'Please install av to use video compression.'\n\n self.keys = keys\n self.params = params\n logging.getLogger('libav').setLevel(50)\n\n def _apply_random_compression(self, imgs):\n \"\"\"This is the function to apply random compression on images.\n\n Args:\n imgs (Tensor): training images\n\n Returns:\n Tensor: images after randomly compressed\n \"\"\"\n codec = random.choices(self.params['codec'],\n self.params['codec_prob'])[0]\n bitrate = self.params['bitrate']\n bitrate = np.random.randint(bitrate[0], bitrate[1] + 1)\n\n buf = io.BytesIO()\n with av.open(buf, 'w', 'mp4') as container:\n stream = container.add_stream(codec, rate=1)\n stream.height = imgs[0].shape[0]\n stream.width = imgs[0].shape[1]\n stream.pix_fmt = 'yuv420p'\n stream.bit_rate = bitrate\n\n for img in imgs:\n img = img.astype(np.uint8)\n frame = av.VideoFrame.from_ndarray(img, format='rgb24')\n frame.pict_type = 'NONE'\n for packet in stream.encode(frame):\n container.mux(packet)\n\n # Flush stream\n for packet in stream.encode():\n container.mux(packet)\n\n outputs = []\n with av.open(buf, 'r', 'mp4') as container:\n if container.streams.video:\n for frame in container.decode(**{'video': 0}):\n outputs.append(frame.to_rgb().to_ndarray().astype(\n np.float32))\n\n return outputs\n\n def __call__(self, results):\n if np.random.uniform() > self.params.get('prob', 1):\n return results\n\n for key in self.keys:\n results[key] = self._apply_random_compression(results[key])\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(params={self.params}, keys={self.keys})')\n return repr_str" }, { "identifier": "DegradationsWithShuffle", "path": "basicsr/data/mmcv_transforms/random_degradations.py", "snippet": "class DegradationsWithShuffle:\n \"\"\"Apply random degradations to input, with degradations being shuffled.\n\n Degradation groups are supported. The order of degradations within the same\n group is preserved. For example, if we have degradations = [a, b, [c, d]]\n and shuffle_idx = None, then the possible orders are\n\n ::\n\n [a, b, [c, d]]\n [a, [c, d], b]\n [b, a, [c, d]]\n [b, [c, d], a]\n [[c, d], a, b]\n [[c, d], b, a]\n\n Modified keys are the attributed specified in \"keys\".\n\n Args:\n degradations (list[dict]): The list of degradations.\n keys (list[str]): A list specifying the keys whose values are\n modified.\n shuffle_idx (list | None, optional): The degradations corresponding to\n these indices are shuffled. If None, all degradations are shuffled.\n Default: None.\n \"\"\"\n\n def __init__(self, degradations, keys, shuffle_idx=None):\n\n self.keys = keys\n\n self.degradations = self._build_degradations(degradations)\n\n if shuffle_idx is None:\n self.shuffle_idx = list(range(0, len(degradations)))\n else:\n self.shuffle_idx = shuffle_idx\n\n def _build_degradations(self, degradations):\n for i, degradation in enumerate(degradations):\n if isinstance(degradation, (list, tuple)):\n degradations[i] = self._build_degradations(degradation)\n else:\n degradation_ = allowed_degradations[degradation['type']]\n degradations[i] = degradation_(degradation['params'],\n self.keys)\n\n return degradations\n\n def __call__(self, results):\n # shuffle degradations\n if len(self.shuffle_idx) > 0:\n shuffle_list = [self.degradations[i] for i in self.shuffle_idx]\n np.random.shuffle(shuffle_list)\n for i, idx in enumerate(self.shuffle_idx):\n self.degradations[idx] = shuffle_list[i]\n\n # apply degradations to input\n for degradation in self.degradations:\n if isinstance(degradation, (tuple, list)):\n for subdegrdation in degradation:\n results = subdegrdation(results)\n else:\n results = degradation(results)\n\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += (f'(degradations={self.degradations}, '\n f'keys={self.keys}, '\n f'shuffle_idx={self.shuffle_idx})')\n return repr_str" }, { "identifier": "circular_lowpass_kernel", "path": "basicsr/data/degradations.py", "snippet": "def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):\n \"\"\"2D sinc filter\n\n Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter\n\n Args:\n cutoff (float): cutoff frequency in radians (pi is max)\n kernel_size (int): horizontal and vertical size, must be odd.\n pad_to (int): pad kernel size to desired size, must be odd or zero.\n \"\"\"\n assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'\n kernel = np.fromfunction(\n lambda x, y: cutoff * special.j1(cutoff * np.sqrt(\n (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt(\n (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size])\n kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi)\n kernel = kernel / np.sum(kernel)\n if pad_to > kernel_size:\n pad_size = (pad_to - kernel_size) // 2\n kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))\n return kernel" }, { "identifier": "random_mixed_kernels", "path": "basicsr/data/degradations.py", "snippet": "def random_mixed_kernels(kernel_list,\n kernel_prob,\n kernel_size=21,\n sigma_x_range=(0.6, 5),\n sigma_y_range=(0.6, 5),\n rotation_range=(-math.pi, math.pi),\n betag_range=(0.5, 8),\n betap_range=(0.5, 8),\n noise_range=None,\n return_sigma=False):\n \"\"\"Randomly generate mixed kernels.\n\n Args:\n kernel_list (tuple): a list name of kernel types,\n support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',\n 'plateau_aniso']\n kernel_prob (tuple): corresponding kernel probability for each\n kernel type\n kernel_size (int):\n sigma_x_range (tuple): [0.6, 5]\n sigma_y_range (tuple): [0.6, 5]\n rotation range (tuple): [-math.pi, math.pi]\n beta_range (tuple): [0.5, 8]\n noise_range(tuple, optional): multiplicative kernel noise,\n [0.75, 1.25]. Default: None\n\n Returns:\n kernel (ndarray):\n \"\"\"\n kernel_type = random.choices(kernel_list, kernel_prob)[0]\n if not return_sigma:\n if kernel_type == 'iso':\n kernel = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'aniso':\n kernel = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)\n elif kernel_type == 'generalized_iso':\n kernel = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=True,\n return_sigma=return_sigma)\n elif kernel_type == 'generalized_aniso':\n kernel = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=False,\n return_sigma=return_sigma)\n elif kernel_type == 'plateau_iso':\n kernel = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'plateau_aniso':\n kernel = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)\n return kernel\n else:\n if kernel_type == 'iso':\n kernel, sigma_list = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'aniso':\n kernel, sigma_list = random_bivariate_Gaussian(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma)\n elif kernel_type == 'generalized_iso':\n kernel, sigma_list = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=True,\n return_sigma=return_sigma)\n elif kernel_type == 'generalized_aniso':\n kernel, sigma_list = random_bivariate_generalized_Gaussian(\n kernel_size,\n sigma_x_range,\n sigma_y_range,\n rotation_range,\n betag_range,\n noise_range=noise_range,\n isotropic=False,\n return_sigma=return_sigma)\n elif kernel_type == 'plateau_iso':\n kernel, sigma_list = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma)\n elif kernel_type == 'plateau_aniso':\n kernel, sigma_list = random_bivariate_plateau(\n kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma)\n return kernel, sigma_list" }, { "identifier": "augment", "path": "basicsr/data/transforms.py", "snippet": "def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False):\n \"\"\"Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).\n\n We use vertical flip and transpose for rotation implementation.\n All the images in the list use the same augmentation.\n\n Args:\n imgs (list[ndarray] | ndarray): Images to be augmented. If the input\n is an ndarray, it will be transformed to a list.\n hflip (bool): Horizontal flip. Default: True.\n rotation (bool): Ratotation. Default: True.\n flows (list[ndarray]: Flows to be augmented. If the input is an\n ndarray, it will be transformed to a list.\n Dimension is (h, w, 2). Default: None.\n return_status (bool): Return the status of flip and rotation.\n Default: False.\n\n Returns:\n list[ndarray] | ndarray: Augmented images and flows. If returned\n results only have one element, just return ndarray.\n\n \"\"\"\n hflip = hflip and random.random() < 0.5\n vflip = rotation and random.random() < 0.5\n rot90 = rotation and random.random() < 0.5\n\n def _augment(img):\n if hflip: # horizontal\n cv2.flip(img, 1, img)\n if vflip: # vertical\n cv2.flip(img, 0, img)\n if rot90:\n img = img.transpose(1, 0, 2)\n return img\n\n def _augment_flow(flow):\n if hflip: # horizontal\n cv2.flip(flow, 1, flow)\n flow[:, :, 0] *= -1\n if vflip: # vertical\n cv2.flip(flow, 0, flow)\n flow[:, :, 1] *= -1\n if rot90:\n flow = flow.transpose(1, 0, 2)\n flow = flow[:, :, [1, 0]]\n return flow\n\n if not isinstance(imgs, list):\n imgs = [imgs]\n imgs = [_augment(img) for img in imgs]\n if len(imgs) == 1:\n imgs = imgs[0]\n\n if flows is not None:\n if not isinstance(flows, list):\n flows = [flows]\n flows = [_augment_flow(flow) for flow in flows]\n if len(flows) == 1:\n flows = flows[0]\n return imgs, flows\n else:\n if return_status:\n return imgs, (hflip, vflip, rot90)\n else:\n return imgs" }, { "identifier": "single_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def single_random_crop(img_gts, gt_patch_size, gt_path=None):\n \"\"\"Random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_gt, w_gt = img_gts[0].shape[0:2]\n\n # if h_gt != h_lq * scale or w_gt != w_lq * scale:\n # raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n # f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_gt < gt_patch_size or w_gt < gt_patch_size:\n raise ValueError(f'GT ({h_gt}, {w_gt}) is smaller than '\n f'patch size ({gt_patch_size}, {gt_patch_size}).')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_gt - gt_patch_size)\n left = random.randint(0, w_gt - gt_patch_size)\n\n # crop corresponding gt patch\n if input_type == 'Tensor':\n img_gts = [v[:, :, top:top + gt_patch_size, left:left + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top:top + gt_patch_size, left:left + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n\n return img_gts" }, { "identifier": "paired_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):\n \"\"\"Paired random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n img_lqs (list[ndarray] | ndarray): LQ images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n scale (int): Scale factor.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n return img_gts, img_lqs" }, { "identifier": "FileClient", "path": "basicsr/utils/file_client.py", "snippet": "class FileClient(object):\n \"\"\"A general file client to access files in different backend.\n\n The client loads a file or text in a specified backend from its path\n and return it as a binary file. it can also register other backend\n accessor with a given name and backend class.\n\n Attributes:\n backend (str): The storage backend type. Options are \"disk\",\n \"memcached\" and \"lmdb\".\n client (:obj:`BaseStorageBackend`): The backend object.\n \"\"\"\n\n _backends = {\n 'disk': HardDiskBackend,\n 'memcached': MemcachedBackend,\n 'lmdb': LmdbBackend,\n }\n\n def __init__(self, backend='disk', **kwargs):\n if backend not in self._backends:\n raise ValueError(f'Backend {backend} is not supported. Currently supported ones'\n f' are {list(self._backends.keys())}')\n self.backend = backend\n self.client = self._backends[backend](**kwargs)\n\n def get(self, filepath, client_key='default'):\n # client_key is used only for lmdb, where different fileclients have\n # different lmdb environments.\n if self.backend == 'lmdb':\n return self.client.get(filepath, client_key)\n else:\n return self.client.get(filepath)\n\n def get_text(self, filepath):\n return self.client.get_text(filepath)" }, { "identifier": "imfrombytes", "path": "basicsr/utils/img_util.py", "snippet": "def imfrombytes(content, flag='color', float32=False):\n \"\"\"Read an image from bytes.\n\n Args:\n content (bytes): Image bytes got from files or other streams.\n flag (str): Flags specifying the color type of a loaded image,\n candidates are `color`, `grayscale` and `unchanged`.\n float32 (bool): Whether to change to float32., If True, will also norm\n to [0, 1]. Default: False.\n\n Returns:\n ndarray: Loaded image array.\n \"\"\"\n img_np = np.frombuffer(content, np.uint8)\n imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}\n img = cv2.imdecode(img_np, imread_flags[flag])\n if float32:\n img = img.astype(np.float32) / 255.\n return img" }, { "identifier": "img2tensor", "path": "basicsr/utils/img_util.py", "snippet": "def img2tensor(imgs, bgr2rgb=True, float32=True):\n \"\"\"Numpy array to tensor.\n\n Args:\n imgs (list[ndarray] | ndarray): Input images.\n bgr2rgb (bool): Whether to change bgr to rgb.\n float32 (bool): Whether to change to float32.\n\n Returns:\n list[tensor] | tensor: Tensor images. If returned results only have\n one element, just return tensor.\n \"\"\"\n\n def _totensor(img, bgr2rgb, float32):\n if img.shape[2] == 3 and bgr2rgb:\n if img.dtype == 'float64':\n img = img.astype('float32')\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = torch.from_numpy(img.transpose(2, 0, 1))\n if float32:\n img = img.float()\n return img\n\n if isinstance(imgs, list):\n return [_totensor(img, bgr2rgb, float32) for img in imgs]\n else:\n return _totensor(imgs, bgr2rgb, float32)" }, { "identifier": "imwrite", "path": "basicsr/utils/img_util.py", "snippet": "def imwrite(img, file_path, params=None, auto_mkdir=True):\n \"\"\"Write image to file.\n\n Args:\n img (ndarray): Image array to be written.\n file_path (str): Image file path.\n params (None or list): Same as opencv's :func:`imwrite` interface.\n auto_mkdir (bool): If the parent folder of `file_path` does not exist,\n whether to create it automatically.\n\n Returns:\n bool: Successful or not.\n \"\"\"\n if auto_mkdir:\n dir_name = os.path.abspath(os.path.dirname(file_path))\n os.makedirs(dir_name, exist_ok=True)\n ok = cv2.imwrite(file_path, img, params)\n if not ok:\n raise IOError('Failed in writing images.')" }, { "identifier": "tensor2img", "path": "basicsr/utils/img_util.py", "snippet": "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\n \"\"\"Convert torch Tensors into image numpy arrays.\n\n After clamping to [min, max], values will be normalized to [0, 1].\n\n Args:\n tensor (Tensor or list[Tensor]): Accept shapes:\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\n 2) 3D Tensor of shape (3/1 x H x W);\n 3) 2D Tensor of shape (H x W).\n Tensor channel should be in RGB order.\n rgb2bgr (bool): Whether to change rgb to bgr.\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\n to uint8 type with range [0, 255]; otherwise, float type with\n range [0, 1]. Default: ``np.uint8``.\n min_max (tuple[int]): min and max values for clamp.\n\n Returns:\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\n shape (H x W). The channel order is BGR.\n \"\"\"\n if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):\n raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')\n\n if torch.is_tensor(tensor):\n tensor = [tensor]\n result = []\n for _tensor in tensor:\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\n\n n_dim = _tensor.dim()\n if n_dim == 4:\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\n img_np = img_np.transpose(1, 2, 0)\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 3:\n img_np = _tensor.numpy()\n img_np = img_np.transpose(1, 2, 0)\n if img_np.shape[2] == 1: # gray image\n img_np = np.squeeze(img_np, axis=2)\n else:\n if rgb2bgr:\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n elif n_dim == 2:\n img_np = _tensor.numpy()\n else:\n raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}')\n if out_type == np.uint8:\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\n img_np = (img_np * 255.0).round()\n img_np = img_np.astype(out_type)\n result.append(img_np)\n # if len(result) == 1 and torch.is_tensor(tensor):\n if len(result) == 1:\n result = result[0]\n return result" }, { "identifier": "get_root_logger", "path": "basicsr/utils/logger.py", "snippet": "def get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=None):\n \"\"\"Get the root logger.\n\n The logger will be initialized if it has not been initialized. By default a\n StreamHandler will be added. If `log_file` is specified, a FileHandler will\n also be added.\n\n Args:\n logger_name (str): root logger name. Default: 'basicsr'.\n log_file (str | None): The log filename. If specified, a FileHandler\n will be added to the root logger.\n log_level (int): The root logger level. Note that only the process of\n rank 0 is affected, while other processes will set the level to\n \"Error\" and be silent most of the time.\n\n Returns:\n logging.Logger: The root logger.\n \"\"\"\n logger = logging.getLogger(logger_name)\n # if the logger has been initialized, just return it\n if logger_name in initialized_logger:\n return logger\n\n format_str = '%(asctime)s %(levelname)s: %(message)s'\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(format_str))\n logger.addHandler(stream_handler)\n logger.propagate = False\n rank, _ = get_dist_info()\n if rank != 0:\n logger.setLevel('ERROR')\n elif log_file is not None:\n logger.setLevel(log_level)\n # add file handler\n file_handler = logging.FileHandler(log_file, 'w')\n file_handler.setFormatter(logging.Formatter(format_str))\n file_handler.setLevel(log_level)\n logger.addHandler(file_handler)\n initialized_logger[logger_name] = True\n return logger" }, { "identifier": "dequantize_flow", "path": "basicsr/utils/flow_util.py", "snippet": "def dequantize_flow(dx, dy, max_val=0.02, denorm=True):\n \"\"\"Recover from quantized flow.\n\n Args:\n dx (ndarray): Quantized dx.\n dy (ndarray): Quantized dy.\n max_val (float): Maximum value used when quantizing.\n denorm (bool): Whether to multiply flow values with width/height.\n\n Returns:\n ndarray: Dequantized flow.\n \"\"\"\n assert dx.shape == dy.shape\n assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)\n\n dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]\n\n if denorm:\n dx *= dx.shape[1]\n dy *= dx.shape[0]\n flow = np.dstack((dx, dy))\n return flow" }, { "identifier": "DATASET_REGISTRY", "path": "basicsr/utils/registry.py", "snippet": "DATASET_REGISTRY = Registry('dataset')" } ]
import cv2 import math import time import os import os.path as osp import numpy as np import random import torch from copy import deepcopy from pathlib import Path from torch.utils import data as data from basicsr.data.mmcv_transforms import Clip, UnsharpMasking, RescaleToZeroOne from basicsr.data.mmcv_transforms import RandomBlur, RandomResize, RandomNoise, RandomJPEGCompression, RandomVideoCompression, DegradationsWithShuffle from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels from basicsr.data.transforms import augment, single_random_crop, paired_random_crop from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor, tensor2img, imwrite from basicsr.utils.flow_util import dequantize_flow from basicsr.utils.registry import DATASET_REGISTRY
15,788
folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # blur settings for the first degradation self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability self.blur_sigma = opt['blur_sigma'] self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels self.betap_range = opt['betap_range'] # betap used in plateau blur kernels self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters # blur settings for the second degradation self.blur_kernel_size2 = opt['blur_kernel_size2'] self.kernel_list2 = opt['kernel_list2'] self.kernel_prob2 = opt['kernel_prob2'] self.blur_sigma2 = opt['blur_sigma2'] self.betag_range2 = opt['betag_range2'] self.betap_range2 = opt['betap_range2'] self.sinc_prob2 = opt['sinc_prob2'] # a final sinc filter self.final_sinc_prob = opt['final_sinc_prob'] self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 # TODO: kernel range is now hard-coded, should be in the configure file self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect self.pulse_tensor[10, 10] = 1 def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) # list-to-list img_gts = img2tensor(img_gts) # kernels kernel1s = [] kernel2s = [] sinc_kernels = [] for _ in range(len(img_gts)): # ------------------------ Generate kernels (used in the first degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob']: # this sinc filter setting is for kernels ranging from [7, 21] if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else:
# @DATASET_REGISTRY.register() class RealVSRRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealVSRRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # the first degradation self.random_blur_1 = RandomBlur( params=opt['degradation_1']['random_blur']['params'], keys=opt['degradation_1']['random_blur']['keys'] ) self.random_resize_1 = RandomResize( params=opt['degradation_1']['random_resize']['params'], keys=opt['degradation_1']['random_resize']['keys'] ) self.random_noise_1 = RandomNoise( params=opt['degradation_1']['random_noise']['params'], keys=opt['degradation_1']['random_noise']['keys'] ) self.random_jpeg_1 = RandomJPEGCompression( params=opt['degradation_1']['random_jpeg']['params'], keys=opt['degradation_1']['random_jpeg']['keys'] ) self.random_mpeg_1 = RandomVideoCompression( params=opt['degradation_1']['random_mpeg']['params'], keys=opt['degradation_1']['random_mpeg']['keys'] ) # the second degradation self.random_blur_2 = RandomBlur( params=opt['degradation_2']['random_blur']['params'], keys=opt['degradation_2']['random_blur']['keys'] ) self.random_resize_2 = RandomResize( params=opt['degradation_2']['random_resize']['params'], keys=opt['degradation_2']['random_resize']['keys'] ) self.random_noise_2 = RandomNoise( params=opt['degradation_2']['random_noise']['params'], keys=opt['degradation_2']['random_noise']['keys'] ) self.random_jpeg_2 = RandomJPEGCompression( params=opt['degradation_2']['random_jpeg']['params'], keys=opt['degradation_2']['random_jpeg']['keys'] ) self.random_mpeg_2 = RandomVideoCompression( params=opt['degradation_2']['random_mpeg']['params'], keys=opt['degradation_2']['random_mpeg']['keys'] ) # final self.resize_final = RandomResize( params=opt['degradation_2']['resize_final']['params'], keys=opt['degradation_2']['resize_final']['keys'] ) self.blur_final = RandomBlur( params=opt['degradation_2']['blur_final']['params'], keys=opt['degradation_2']['blur_final']['keys'] ) # transforms self.usm = UnsharpMasking( kernel_size=opt['transforms']['usm']['kernel_size'], sigma=opt['transforms']['usm']['sigma'], weight=opt['transforms']['usm']['weight'], threshold=opt['transforms']['usm']['threshold'], keys=opt['transforms']['usm']['keys'] ) self.clip = Clip(keys=opt['transforms']['clip']['keys']) self.rescale = RescaleToZeroOne(keys=opt['transforms']['rescale']['keys']) def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=False) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) img_lqs = deepcopy(img_gts) out_dict = {'lqs': img_lqs, 'gts': img_gts} out_dict = self.usm.transform(out_dict) ## the first degradation out_dict = self.random_blur_1(out_dict) out_dict = self.random_resize_1(out_dict) out_dict = self.random_noise_1(out_dict) out_dict = self.random_jpeg_1(out_dict) out_dict = self.random_mpeg_1(out_dict) ## the second degradation out_dict = self.random_blur_2(out_dict) out_dict = self.random_resize_2(out_dict) out_dict = self.random_noise_2(out_dict) out_dict = self.random_jpeg_2(out_dict) out_dict = self.random_mpeg_2(out_dict) ## final resize out_dict = self.resize_final(out_dict) out_dict = self.blur_final(out_dict) # post process out_dict = self.clip(out_dict) out_dict = self.rescale.transform(out_dict) # list-to-list for k in out_dict.keys(): out_dict[k] = img2tensor(out_dict[k]) # img_gts: (t, c, h, w) # key: str return out_dict def __len__(self): return len(self.keys) # @DATASET_REGISTRY.register() class RealESRGANRecurrentDataset(data.Dataset): """REDS dataset for training recurrent networks. The keys are generated from a meta info txt file. basicsr/data/meta_info/meta_info_REDS_GT.txt Each line contains: 1. subfolder (clip) name; 2. frame number; 3. image shape, separated by a white space. Examples: 000 100 (720,1280,3) 001 100 (720,1280,3) ... Key examples: "000/00000000" GT (gt): Ground-Truth; LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. Args: opt (dict): Config for train dataset. It contains the following keys: dataroot_gt (str): Data root path for gt. meta_info_file (str): Path for meta information file. val_partition (str): Validation partition types. 'REDS4' or 'official'. io_backend (dict): IO backend type and other kwarg. num_frame (int): Window size for input frames. gt_size (int): Cropped patched size for gt patches. interval_list (list): Interval list for temporal augmentation. random_reverse (bool): Random reverse input frames. use_hflip (bool): Use horizontal flips. use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). """ def __init__(self, opt): super(RealESRGANRecurrentDataset, self).__init__() self.opt = opt self.gt_root = Path(opt['dataroot_gt']) self.num_frame = opt['num_frame'] self.keys = [] with open(opt['meta_info_file'], 'r') as fin: for line in fin: folder, frame_num, _ = line.split(' ') self.keys.extend([f'{folder}/{i:08d}' for i in range(int(frame_num))]) # remove the video clips used in validation if opt['val_partition'] == 'REDS4': val_partition = ['000', '011', '015', '020'] elif opt['val_partition'] == 'official': val_partition = [f'{v:03d}' for v in range(240, 270)] else: raise ValueError(f'Wrong validation partition {opt["val_partition"]}.' f"Supported ones are ['official', 'REDS4'].") if opt['test_mode']: self.keys = [v for v in self.keys if v.split('/')[0] in val_partition] else: self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] # file client (io backend) self.file_client = None self.io_backend_opt = opt['io_backend'] self.is_lmdb = False if self.io_backend_opt['type'] == 'lmdb': self.is_lmdb = True self.io_backend_opt['db_paths'] = [self.gt_root] self.io_backend_opt['client_keys'] = ['gt'] # temporal augmentation configs self.interval_list = opt.get('interval_list', [1]) self.random_reverse = opt.get('random_reverse', False) interval_str = ','.join(str(x) for x in self.interval_list) logger = get_root_logger() logger.info(f'Temporal augmentation interval list: [{interval_str}]; ' f'random reverse is {self.random_reverse}.') # blur settings for the first degradation self.blur_kernel_size = opt['blur_kernel_size'] self.kernel_list = opt['kernel_list'] self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability self.blur_sigma = opt['blur_sigma'] self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels self.betap_range = opt['betap_range'] # betap used in plateau blur kernels self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters # blur settings for the second degradation self.blur_kernel_size2 = opt['blur_kernel_size2'] self.kernel_list2 = opt['kernel_list2'] self.kernel_prob2 = opt['kernel_prob2'] self.blur_sigma2 = opt['blur_sigma2'] self.betag_range2 = opt['betag_range2'] self.betap_range2 = opt['betap_range2'] self.sinc_prob2 = opt['sinc_prob2'] # a final sinc filter self.final_sinc_prob = opt['final_sinc_prob'] self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21 # TODO: kernel range is now hard-coded, should be in the configure file self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect self.pulse_tensor[10, 10] = 1 def __getitem__(self, index): if self.file_client is None: self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) gt_size = self.opt['gt_size'] key = self.keys[index] clip_name, frame_name = key.split('/') # key example: 000/00000000 # determine the neighboring frames interval = random.choice(self.interval_list) # ensure not exceeding the borders start_frame_idx = int(frame_name) if start_frame_idx > 100 - self.num_frame * interval: start_frame_idx = random.randint(0, 100 - self.num_frame * interval) end_frame_idx = start_frame_idx + self.num_frame * interval neighbor_list = list(range(start_frame_idx, end_frame_idx, interval)) # random reverse if self.random_reverse and random.random() < 0.5: neighbor_list.reverse() # get the GT frames img_gts = [] for neighbor in neighbor_list: if self.is_lmdb: img_gt_path = f'{clip_name}/{neighbor:08d}' else: img_gt_path = self.gt_root / clip_name / f'{neighbor:08d}.png' # get GT img_bytes = self.file_client.get(img_gt_path, 'gt') img_gt = imfrombytes(img_bytes, float32=True) img_gts.append(img_gt) # randomly crop img_gts = single_random_crop(img_gts, gt_size, img_gt_path) # augmentation - flip, rotate img_gts = augment(img_gts, self.opt['use_hflip'], self.opt['use_rot']) # list-to-list img_gts = img2tensor(img_gts) # kernels kernel1s = [] kernel2s = [] sinc_kernels = [] for _ in range(len(img_gts)): # ------------------------ Generate kernels (used in the first degradation) ------------------------ # kernel_size = random.choice(self.kernel_range) if np.random.uniform() < self.opt['sinc_prob']: # this sinc filter setting is for kernels ranging from [7, 21] if kernel_size < 13: omega_c = np.random.uniform(np.pi / 3, np.pi) else: omega_c = np.random.uniform(np.pi / 5, np.pi) kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False) else:
kernel = random_mixed_kernels(
10
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_s_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "FilterAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class FilterAnnotations(BaseTransform):\n \"\"\"Filter invalid annotations.\n\n Required Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n\n Args:\n min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n boxes. Default: (1., 1.)\n min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n Default: 1\n by_box (bool): Filter instances with bounding boxes not meeting the\n min_gt_bbox_wh threshold. Default: True\n by_mask (bool): Filter instances with masks not meeting\n min_gt_mask_area threshold. Default: False\n keep_empty (bool): Whether to return None when it\n becomes an empty bbox after filtering. Defaults to True.\n \"\"\"\n\n def __init__(self,\n min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n min_gt_mask_area: int = 1,\n by_box: bool = True,\n by_mask: bool = False,\n keep_empty: bool = True) -> None:\n # TODO: add more filter options\n assert by_box or by_mask\n self.min_gt_bbox_wh = min_gt_bbox_wh\n self.min_gt_mask_area = min_gt_mask_area\n self.by_box = by_box\n self.by_mask = by_mask\n self.keep_empty = keep_empty\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to filter annotations.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n assert 'gt_bboxes' in results\n gt_bboxes = results['gt_bboxes']\n if gt_bboxes.shape[0] == 0:\n return results\n\n tests = []\n if self.by_box:\n tests.append(\n ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n if self.by_mask:\n assert 'gt_masks' in results\n gt_masks = results['gt_masks']\n tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n keep = tests[0]\n for t in tests[1:]:\n keep = keep & t\n\n if not keep.any():\n if self.keep_empty:\n return None\n\n keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n for key in keys:\n if key in results:\n results[key] = results[key][keep]\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n f'keep_empty={self.keep_empty})'" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" }, { "identifier": "PipelineSwitchHook", "path": "mmdet/engine/hooks/pipeline_switch_hook.py", "snippet": "class PipelineSwitchHook(Hook):\n \"\"\"Switch data pipeline at switch_epoch.\n\n Args:\n switch_epoch (int): switch pipeline at this epoch.\n switch_pipeline (list[dict]): the pipeline to switch to.\n \"\"\"\n\n def __init__(self, switch_epoch, switch_pipeline):\n self.switch_epoch = switch_epoch\n self.switch_pipeline = switch_pipeline\n self._restart_dataloader = False\n self._has_switched = False\n\n def before_train_epoch(self, runner):\n \"\"\"switch pipeline.\"\"\"\n epoch = runner.epoch\n train_loader = runner.train_dataloader\n if epoch >= self.switch_epoch and not self._has_switched:\n runner.logger.info('Switch pipeline now!')\n # The dataset pipeline cannot be updated when persistent_workers\n # is True, so we need to force the dataloader's multi-process\n # restart. This is a very hacky approach.\n train_loader.dataset.pipeline = Compose(self.switch_pipeline)\n if hasattr(train_loader, 'persistent_workers'\n ) and train_loader.persistent_workers is True:\n train_loader._DataLoader__initialized = False\n train_loader._iterator = None\n self._restart_dataloader = True\n self._has_switched = True\n else:\n # Once the restart is complete, we need to restore\n # the initialization flag.\n if self._restart_dataloader:\n train_loader._DataLoader__initialized = True" }, { "identifier": "ExpMomentumEMA", "path": "mmdet/models/layers/ema.py", "snippet": "class ExpMomentumEMA(ExponentialMovingAverage):\n \"\"\"Exponential moving average (EMA) with exponential momentum strategy,\n which is used in YOLOX.\n\n Args:\n model (nn.Module): The model to be averaged.\n momentum (float): The momentum used for updating ema parameter.\n Ema's parameter are updated with the formula:\n `averaged_param = (1-momentum) * averaged_param + momentum *\n source_param`. Defaults to 0.0002.\n gamma (int): Use a larger momentum early in training and gradually\n annealing to a smaller value to update the ema model smoothly. The\n momentum is calculated as\n `(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.\n Defaults to 2000.\n interval (int): Interval between two updates. Defaults to 1.\n device (torch.device, optional): If provided, the averaged model will\n be stored on the :attr:`device`. Defaults to None.\n update_buffers (bool): if True, it will compute running averages for\n both the parameters and the buffers of the model. Defaults to\n False.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n momentum: float = 0.0002,\n gamma: int = 2000,\n interval=1,\n device: Optional[torch.device] = None,\n update_buffers: bool = False) -> None:\n super().__init__(\n model=model,\n momentum=momentum,\n interval=interval,\n device=device,\n update_buffers=update_buffers)\n assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'\n self.gamma = gamma\n\n def avg_func(self, averaged_param: Tensor, source_param: Tensor,\n steps: int) -> None:\n \"\"\"Compute the moving average of the parameters using the exponential\n momentum strategy.\n\n Args:\n averaged_param (Tensor): The averaged parameters.\n source_param (Tensor): The source parameters.\n steps (int): The number of times the parameters have been\n updated.\n \"\"\"\n momentum = (1 - self.momentum) * math.exp(\n -float(1 + steps) / self.gamma) + self.momentum\n averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)" } ]
from mmengine.config import read_base from .rtmdet_ins_l_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmengine.hooks.ema_hook import EMAHook from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug) from mmdet.engine.hooks.pipeline_switch_hook import PipelineSwitchHook from mmdet.models.layers.ema import ExpMomentumEMA
18,177
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.33, widen_factor=0.5, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict(in_channels=128, feat_channels=128))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict(type=CachedMosaic, img_scale=(640, 640), pad_val=114.0), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True), dict(
type=RandomCrop,
6
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
core/defense/amd_dnn_plus.py
[ { "identifier": "Max", "path": "core/attack/max.py", "snippet": "class Max(BaseAttack):\n \"\"\"\n Max攻击:迭代地从多个攻击方法中选择结果。\n\n 参数\n --------\n @param attack_list: List, 已实例化的攻击对象的列表。\n @param varepsilon: Float, 用于判断收敛性的标量。\n \"\"\"\n\n def __init__(self, attack_list, varepsilon=1e-20,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n \"\"\"\n 构造函数\n\n 参数:\n - attack_list: 已实例化的攻击对象的列表,至少应该有一个攻击方法。\n - varepsilon: 用于判断收敛性的标量,默认值为1e-20。\n - is_attacker: Bool, 表示是否为攻击者,默认为True。\n - oblivion: Bool, 一个布尔标志(其功能在这里并未详细说明),默认为False。\n - kappa: Float, 一个浮点数参数,默认为1。\n - manipulation_x: 可能与数据的处理或操纵有关,具体用途未详细说明。\n - omega: 参数omega的具体用途未详细说明。\n - device: 设备,例如'cuda'或'cpu',用于执行计算。\n\n 注意:\n - 在初始化过程中,会首先检查`attack_list`是否包含至少一个攻击对象。\n \"\"\"\n super(Max, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device) # 调用父类的构造函数\n assert len(attack_list) > 0, '至少需要一个攻击方法。' # 确保提供了至少一个攻击对象\n self.attack_list = attack_list # 设置攻击列表\n self.varepsilon = varepsilon # 设置varepsilon值\n self.device = device # 设置计算设备\n\n def perturb(self, model, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n model.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n loss, done = self.get_scores(model, x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label))\n else:\n pertbx.append(attack.perturb(model=model, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n ))\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n loss, done = self.get_scores(model, pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n _, done = self.get_scores(model, adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n def perturb_dae(self, predict_model, purifier, x, label=None, steps_max=5, min_lambda_=1e-5, max_lambda_=1e5, verbose=False, oblivion=False):\n \"\"\"\n 扰动节点特征\n\n 参数\n -----------\n @param model: 受害者模型。\n @param x: torch.FloatTensor, 形状为[batch_size, vocab_dim]的特征向量。\n @param label: torch.LongTensor, 真实标签。\n @param steps_max: Integer, 最大的迭代次数。\n @param min_lambda_: float, 平衡对手检测器的重要性(如果存在)。\n @param max_lambda_: float, 同上。\n @param verbose: Boolean, 是否打印详细日志。\n\n 返回值\n --------\n adv_x: 扰动后的数据。\n \"\"\"\n\n # 判断输入数据是否有效\n if x is None or x.shape[0] <= 0:\n return []\n\n # 将模型设为评估模式,主要是为了禁用一些在训练模式下的特殊层,比如Dropout\n predict_model.eval()\n purifier.eval()\n\n # 获取输入数据x在当前模型下的损失和完成状态\n with torch.no_grad():\n if not oblivion:\n purified_x = purifier(x.detach().clone().float()).to(torch.double)\n else:\n purified_x = x.detach().clone()\n loss, done = self.get_scores(predict_model, purified_x, label)\n\n # 存储当前的损失为前一次的损失\n pre_loss = loss\n\n # 获取输入数据的数量以及其他的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n\n # 初始化攻击样本为输入数据的拷贝\n adv_x = x.detach().clone()\n\n # 初始化停止标志,用于表示哪些样本已经完成了攻击\n stop_flag = torch.zeros(n, dtype=torch.bool, device=self.device)\n\n # 开始主循环,进行多次迭代以改进攻击效果\n for t in range(steps_max):\n # 计算还未完成攻击的样本数量\n num_sample_red = n - torch.sum(stop_flag)\n \n # 如果所有样本都已完成攻击,结束循环\n if num_sample_red <= 0:\n break\n\n # 获取那些还未完成攻击的样本的真实标签\n red_label = label[~stop_flag]\n pertbx = []\n\n # 对于攻击方法列表中的每种攻击方法,尝试对数据进行扰动\n for attack in self.attack_list:\n # 确保每种攻击方法都实现了perturb方法\n assert 'perturb' in type(attack).__dict__.keys()\n\n # 对于某些特定的攻击方法,在第二次及以后的迭代中取消随机化\n if t > 0 and 'use_random' in attack.__dict__.keys():\n attack.use_random = False\n\n # 对于名为\"Orthogonal\"的攻击方法,进行特殊处理\n if 'Orthogonal' in type(attack).__name__:\n pertbx.append(attack.perturb_dae(predict_model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label, oblivion=oblivion))\n else:\n pertbx.append(attack.perturb_dae(model=predict_model, purifier=purifier, x=adv_x[~stop_flag], label=red_label,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n oblivion=oblivion\n ))\n\n # 将所有攻击方法产生的扰动数据合并\n pertbx = torch.vstack(pertbx)\n\n\n # 不需要计算梯度,提高计算效率\n with torch.no_grad():\n # 将真实标签复制若干次以匹配所有的攻击列表\n red_label_ext = torch.cat([red_label] * len(self.attack_list))\n \n # 获取每种攻击方法产生的损失值和成功状态\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n\n loss, done = self.get_scores(predict_model, purified_pertbx, red_label_ext)\n \n # 调整损失和成功状态的形状以方便后续计算\n loss = loss.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n done = done.reshape(len(self.attack_list), num_sample_red).permute(1, 0)\n \n # 判断哪些样本至少有一种攻击方法成功\n success_flag = torch.any(done, dim=-1)\n \n # 对于没有成功的样本,将其标记为1以进行后续处理\n done[~torch.any(done, dim=-1)] = 1\n \n # 调整损失值,对于成功的攻击方法,损失值保持不变;对于失败的,损失值变为最小值\n loss = (loss * done.to(torch.float)) + torch.min(loss) * (~done).to(torch.float)\n \n # 调整扰动数据的形状以方便后续计算\n pertbx = pertbx.reshape(len(self.attack_list), num_sample_red, *red_n).permute([1, 0, *red_ind])\n \n # 选择造成最大损失的扰动数据\n _, indices = loss.max(dim=-1)\n adv_x[~stop_flag] = pertbx[torch.arange(num_sample_red), indices]\n \n # 获取选中的扰动数据的损失值\n a_loss = loss[torch.arange(num_sample_red), indices]\n \n # 复制当前的停止标志\n pre_stop_flag = stop_flag.clone()\n \n # 更新停止标志,如果损失值变化很小或者某种攻击方法成功,则停止迭代\n stop_flag[~stop_flag] = (torch.abs(pre_loss[~stop_flag] - a_loss) < self.varepsilon) | success_flag\n \n # 更新前一个损失值\n pre_loss[~pre_stop_flag] = a_loss\n\n # 如果需要打印日志\n if verbose:\n # 评估最终的扰动数据的成功状态\n with torch.no_grad():\n purified_adv_x = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_scores(predict_model, purified_adv_x, label)\n # 打印攻击成功率\n logger.info(f\"max: attack effectiveness {done.sum().item() / x.size()[0] * 100}%.\")\n\n # 返回最终的扰动数据\n return adv_x\n\n\n # 这个get_scores函数的主要目的是计算扰动数据在给定模型上的损失值,并判断模型对这些扰动数据的预测是否成功完成。\n # 对于具有检测器功能的模型,还会考虑模型的额外输出来决定预测的完成状态。\n def get_scores(self, model, pertb_x, label):\n \"\"\"\n 获取扰动数据在模型上的损失值和预测标签的完成状态。\n\n 参数:\n @param model: 模型对象,即受攻击的目标模型。\n @param pertb_x: torch.Tensor,扰动后的数据。\n @param label: torch.Tensor,扰动数据的真实标签。\n\n 返回:\n - loss_no_reduction: 每个样本的损失值(无降维处理)。\n - done: Boolean Tensor,表示模型对每个样本的预测是否成功完成。\n \"\"\"\n # 判断模型是否具有检测器功能,如果有,则获取模型的两个输出:logits_f 和 prob_g。\n if hasattr(model, 'is_detector_enabled'):\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有检测器功能,只获取一个输出logits_f。\n logits_f = model.forward(pertb_x)\n\n # 使用交叉熵计算每个样本的损失值\n ce = F.cross_entropy(logits_f, label, reduction='none')\n\n # 获取模型的预测标签\n y_pred = logits_f.argmax(1)\n\n # 如果模型具有检测器功能且不处于\"oblivion\"模式,则进行特殊处理。\n # 使用模型的输出prob_g来判断是否成功完成了预测。\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n tau = model.get_tau_sample_wise(y_pred)\n loss_no_reduction = -prob_g\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果模型没有检测器功能或处于\"oblivion\"模式,则使用交叉熵损失来判断是否成功完成了预测。\n loss_no_reduction = ce\n done = y_pred != label\n\n return loss_no_reduction, done" }, { "identifier": "StepwiseMax", "path": "core/attack/stepwise_max.py", "snippet": "class StepwiseMax(BaseAttack):\n \"\"\"\n Stepwise max攻击方法,这是一个结合了pgd l1, pgd l2, 和 pgd linf三种攻击方式的方法。\n\n 参数\n ----------\n @param use_random: bool类型,是否使用随机的起始点。\n @param rounding_threshold: float类型,用于四舍五入实数的阈值。\n @param is_attacker: bool类型,是否扮演攻击者角色(注意:防御者执行对抗性训练)。\n @param oblivion: bool类型,是否知道敌手指示器。\n @param kappa: 攻击信心度。\n @param manipulation_x: 可操作性。\n @param omega: 与每个api相对应的互依赖api的索引。\n @param device: 设备,'cpu'或'cuda'。\n\n \"\"\"\n\n def __init__(self, use_random=False, rounding_threshold=0.5,\n is_attacker=True, oblivion=False, kappa=1., manipulation_x=None, omega=None, device=None):\n super(StepwiseMax, self).__init__(is_attacker, oblivion, kappa, manipulation_x, omega, device)\n \n # 是否使用随机起点\n self.use_random = use_random\n \n # 断言确保四舍五入阈值在(0, 1)之间\n assert 0 < rounding_threshold < 1\n \n # 设置四舍五入的阈值\n self.round_threshold = rounding_threshold\n \n # lambda_用于正则化,通常与优化的损失一起使用\n self.lambda_ = 1.\n\n def perturb_dae(self, model, purifier, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False,\n oblivion=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n purifier.eval()\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n # 计算损失和完成标志\n if not oblivion:\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_adv = adv_x.detach().clone()\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb_dae(model, purifier, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_,\n oblivion=False\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if not oblivion:\n purified_pertbx = purifier(pertbx.detach().clone().float()).to(torch.double)\n else:\n purified_pertbx = pertbx.detach().clone()\n if (not self.is_attacker) and (not is_score_round): \n scores, _done = self.get_scores(model, purified_pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(purified_pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n purified_adv = purifier(adv_x.detach().clone().float()).to(torch.double)\n _, done = self.get_loss(model, purified_adv, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n\n def perturb(self, model, x, label=None,\n steps=100,\n step_check=1,\n sl_l1=1.,\n sl_l2=1.,\n sl_linf=0.01,\n min_lambda_=1e-5,\n max_lambda_=1e5,\n is_score_round=True,\n base=10.,\n verbose=False):\n \"\"\"\n 对模型进行增强攻击。\n\n @param model: PyTorch模型,待攻击目标。\n @param x: Tensor, 原始输入数据。\n @param label: Tensor或None, 输入数据对应的标签。\n @param steps: int, 攻击的总步数。\n @param step_check: int, 检查间隔,即多少步进行一次检查。\n @param sl_l1: float, L1范数的步长。\n @param sl_l2: float, L2范数的步长。\n @param sl_linf: float, Linf范数的步长。\n @param min_lambda_: float, lambda的最小值。\n @param max_lambda_: float, lambda的最大值。\n @param is_score_round: Boolean, 是否对分数进行四舍五入。\n @param base: float, 基数。\n @param verbose: Boolean, 是否输出详细信息。\n \"\"\"\n # torch.manual_seed(int(random.random() * 100)) # 设置随机种子\n # 参数校验\n assert 0 < min_lambda_ <= max_lambda_\n assert steps >= 0 and (step_check >= 1) and 1 >= sl_l1 > 0 and sl_l2 >= 0 and sl_linf >= 0\n \n model.eval() # 将模型设置为评估模式\n \n # 根据模型是否具有某种属性来设置lambda的初值\n if hasattr(model, 'is_detector_enabled'):\n self.lambda_ = min_lambda_\n else:\n self.lambda_ = max_lambda_\n \n # 如果不是攻击者,从预定义的步骤中随机选择一个\n if not self.is_attacker:\n step_checks = [1, 10, 25, 50]\n step_check = random.choice(step_checks)\n \n # 计算每个小步骤中需要的迭代次数\n mini_steps = [step_check] * (steps // step_check)\n mini_steps = mini_steps + [steps % step_check] if steps % step_check != 0 else mini_steps\n \n # 获取输入的维度信息\n n, red_n = x.size()[0], x.size()[1:]\n red_ind = list(range(2, len(x.size()) + 1))\n \n adv_x = x.detach().clone() # 获取输入数据的副本\n while self.lambda_ <= max_lambda_:\n pert_x_cont = None\n prev_done = None\n for i, mini_step in enumerate(mini_steps):\n with torch.no_grad():\n # 如果是第一步并且启用了随机初始化,那么获取一个随机的起始点\n if i == 0:\n adv_x = get_x0(adv_x, rounding_threshold=self.round_threshold, is_sample=True)\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n \n # print(\"done:\", done)\n \n # 如果所有的都完成了,就退出循环\n if torch.all(done):\n break\n \n # 对于那些没有完成的数据,重新计算扰动\n # print(\"i:\", i)\n if i == 0:\n # print(\"~done:\", (~done))\n adv_x[~done] = x[~done]\n prev_done = done.clone()\n else:\n if (adv_x[~done]).shape[0] == (pert_x_cont[~done[~prev_done]]).shape[0]:\n adv_x[~done] = pert_x_cont[~done[~prev_done]]\n else:\n updated_mask = (~done) & (~prev_done[:len(done)])\n num_to_select = updated_mask.sum().item()\n selected_perturbations = pert_x_cont[:num_to_select]\n adv_x[updated_mask] = selected_perturbations\n\n prev_done = done.clone() \n \n # 对那些未完成的数据进行真正的扰动\n num_sample_red = torch.sum(~done).item()\n pert_x_l1, pert_x_l2, pert_x_linf = self._perturb(model, adv_x[~done], label[~done],\n mini_step,\n sl_l1,\n sl_l2,\n sl_linf,\n lambda_=self.lambda_\n )\n # print(\"pert_x_l1, pert_x_l2, pert_x_linf\", pert_x_l1, pert_x_l2, pert_x_linf)\n # 不计算梯度地执行下列操作\n with torch.no_grad():\n # 构造一个包含三种扰动的列表\n pertb_x_list = [pert_x_linf, pert_x_l2, pert_x_l1]\n n_attacks = len(pertb_x_list) # 获取攻击的数量(即3)\n pertbx = torch.vstack(pertb_x_list) # 垂直堆叠这三种扰动\n label_ext = torch.cat([label[~done]] * n_attacks) # 扩展标签列表,使其与扰动列表长度匹配\n\n # 如果不是攻击者并且不需要四舍五入得分,则获取得分\n # 否则,先对扰动进行四舍五入,再获取得分\n if (not self.is_attacker) and (not is_score_round):\n scores, _done = self.get_scores(model, pertbx, label_ext)\n else:\n scores, _done = self.get_scores(model, round_x(pertbx, self.round_threshold), label_ext)\n \n # 如果得分的最大值大于0,则设置为该值,否则设置为0\n max_v = scores.amax() if scores.amax() > 0 else 0.\n scores[_done] += max_v # 对完成的得分增加max_v\n\n # 重新整形扰动和得分张量,以便后续操作\n pertbx = pertbx.reshape(n_attacks, num_sample_red, *red_n).permute([1, 0, *red_ind])\n scores = scores.reshape(n_attacks, num_sample_red).permute(1, 0)\n\n # 从得分张量中获取最大得分及其索引\n _2, s_idx = scores.max(dim=-1)\n # 使用索引从扰动张量中选择具有最高误导性的扰动\n pert_x_cont = pertbx[torch.arange(num_sample_red), s_idx]\n # print(\"pert_x_cont.shape\", pert_x_cont.shape)\n # 更新经过扰动的数据adv_x\n adv_x[~done] = pert_x_cont if not self.is_attacker else round_x(pert_x_cont, self.round_threshold)\n \n # 更新lambda值以便于下一次循环\n self.lambda_ *= base\n # 如果lambda值检查失败,则中断循环\n if not self.check_lambda(model):\n break\n # 如果是攻击者,对最终的扰动结果进行四舍五入\n if self.is_attacker:\n adv_x = round_x(adv_x, self.round_threshold)\n \n # 不计算梯度地获取最后的损失和完成标志\n with torch.no_grad():\n _, done = self.get_loss(model, adv_x, label, self.lambda_)\n # 如果设置了详细输出,打印攻击效果的百分比\n if verbose:\n logger.info(f\"step-wise max: attack effectiveness {done.sum().item() / done.size()[0] * 100:.3f}%.\")\n # 返回扰动后的数据\n return adv_x\n\n def _perturb(self, model, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n ):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n if \"rnn\" in model.model_save_path:\n model.train()\n if \"lstm\" in model.model_save_path:\n model.train() \n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n loss, done = self.get_loss(model, var_adv_x, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n\n def _perturb_dae(self, model, purifier, x, label=None,\n steps=1,\n step_length_l1=1.,\n step_length_l2=0.5,\n step_length_linf=0.01,\n lambda_=1.,\n oblivion=False):\n \"\"\"\n 对节点的特征向量进行扰动\n\n 参数\n -----------\n @param model: 受害者模型\n @param x: torch.FloatTensor, 节点特征向量(每个表示一个图中的API出现次数)形状为 [batch_size, vocab_dim]\n @param label: torch.LongTensor, 真实的标签\n @param steps: 整数, 迭代的最大次数\n @param step_length_l1: 每次迭代的步长,L1范数\n @param step_length_l2: 每次迭代的步长,L2范数\n @param step_length_linf: 每次迭代的步长,Linf范数\n @param lambda_: 浮点数, 惩罚因子\n \"\"\"\n if x is None or x.shape[0] <= 0:\n return []\n \n self.lambda_ = lambda_\n \n # 确保L1步长在[0,1]之间\n assert 0 <= step_length_l1 <= 1, \"期望在 [0,1] 之间的实数值,但得到 {}\".format(step_length_l1)\n model.eval()\n adv_x = x.detach()\n \n\n def one_iteration(_adv_x, norm_type):\n # 基于当前的扰动输入来计算梯度\n var_adv_x = torch.autograd.Variable(_adv_x, requires_grad=True) # 将_adv_x转换为一个可以进行自动梯度计算的变量\n if not oblivion:\n purified_var = purifier(var_adv_x.detach().clone().float()).to(torch.double)\n else:\n purified_var = var_adv_x.detach().clone()\n loss, done = self.get_loss(model, purified_var, label, self.lambda_) # 获取模型在扰动输入上的损失\n grads = torch.autograd.grad(loss.mean(), var_adv_x, allow_unused=True)\n if grads[0] is None:\n grad = torch.zeros_like(var_adv_x)\n else:\n grad = grads[0].data\n\n # 寻找允许的位置来插入和移除API\n pos_insertion = (_adv_x <= 0.5) * 1 * (_adv_x >= 0.) # 寻找API的可插入位置:特征值在0和0.5之间\n grad4insertion = (grad > 0) * pos_insertion * grad # 根据梯度正值计算插入API的梯度\n\n pos_removal = (_adv_x > 0.5) * 1 # 寻找API的可移除位置:特征值大于0.5\n grad4removal = (grad <= 0) * (pos_removal & self.manipulation_x) * grad # 根据梯度负值计算移除API的梯度\n\n if self.is_attacker:\n # 对于攻击者,处理那些互相依赖的API\n checking_nonexist_api = (pos_removal ^ self.omega) & self.omega # 检查不存在的API\n grad4removal[:, self.api_flag] += torch.sum(grad * checking_nonexist_api, dim=-1, keepdim=True) # 考虑API之间的关系,调整移除API的梯度\n\n # 合并插入和移除的梯度\n grad = grad4removal + grad4insertion\n\n # 根据不同的范数类型,计算扰动值\n if norm_type == 'linf':\n perturbation = torch.sign(grad) # 计算梯度符号来获取无穷范数扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_linf * perturbation, min=0., max=1.) # 应用扰动并确保结果在[0,1]范围内\n\n elif norm_type == 'l2':\n l2norm = torch.linalg.norm(grad, dim=-1, keepdim=True) # 计算L2范数\n perturbation = torch.minimum(\n torch.tensor(1., dtype=_adv_x.dtype, device=_adv_x.device),\n grad / l2norm\n ) # 计算L2范数下的扰动方向\n perturbation = torch.where(torch.isnan(perturbation), 0., perturbation) # 处理NaN值\n perturbation = torch.where(torch.isinf(perturbation), 1., perturbation) # 处理Inf值\n if self.is_attacker:\n min_val = torch.amin(perturbation, dim=-1, keepdim=True).clamp_(max=0.)\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * torch.abs(min_val) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l2 * perturbation, min=0., max=1.)\n\n elif norm_type == 'l1':\n val, idx = torch.abs(grad).topk(int(1. / step_length_l1), dim=-1) # 获取梯度的绝对值的top-k值和相应的索引\n perturbation = F.one_hot(idx, num_classes=_adv_x.shape[-1]).sum(dim=1) # 根据索引计算L1范数下的扰动方向\n perturbation = torch.sign(grad) * perturbation # 使用梯度的符号来调整扰动方向\n if self.is_attacker:\n perturbation += (torch.any(perturbation[:, self.api_flag] < 0, dim=-1, keepdim=True) * checking_nonexist_api)\n return torch.clamp(_adv_x + step_length_l1 * perturbation, min=0., max=1.)\n\n else:\n raise NotImplementedError # 如果范数类型不在L1、L2、Linf中,则引发异常\n\n\n # 为每种范数执行迭代\n adv_x_l1 = adv_x.clone()\n for t in range(steps):\n adv_x_l1 = one_iteration(adv_x_l1, norm_type='l1')\n \n adv_x_l2 = adv_x.clone()\n for t in range(steps):\n adv_x_l2 = one_iteration(adv_x_l2, norm_type='l2')\n \n adv_x_linf = adv_x.clone()\n for t in range(steps):\n adv_x_linf = one_iteration(adv_x_linf, norm_type='linf')\n \n return adv_x_l1, adv_x_l2, adv_x_linf\n\n def get_scores(self, model, pertb_x, label):\n # 如果模型有 'is_detector_enabled' 这个属性\n if hasattr(model, 'is_detector_enabled'):\n # 获取模型的输出,logits_f 是模型的原始输出,prob_g 是一个概率值\n logits_f, prob_g = model.forward(pertb_x)\n else:\n # 如果模型没有 'is_detector_enabled' 这个属性,只获取模型的原始输出\n logits_f = model.forward(pertb_x)\n\n # 获取预测的类别\n y_pred = logits_f.argmax(1)\n \n # 计算交叉熵损失\n ce = F.cross_entropy(logits_f, label, reduction='none')\n \n # 如果模型有 'is_detector_enabled' 这个属性,并且 self.oblivion 为 False\n if hasattr(model, 'is_detector_enabled') and (not self.oblivion):\n # 获取样本的阈值\n tau = model.get_tau_sample_wise(y_pred)\n # 计算损失,加入了 prob_g 这个概率值的惩罚项\n loss_no_reduction = ce - self.lambda_ * prob_g\n # 判断预测是否错误,并且 prob_g 是否小于等于阈值 tau\n done = (y_pred != label) & (prob_g <= tau)\n else:\n # 如果没有 'is_detector_enabled' 这个属性或 self.oblivion 为 True,损失仍然是交叉熵损失\n loss_no_reduction = ce\n # 判断预测是否错误\n done = y_pred != label\n\n # 返回损失值和判断结果c\n return loss_no_reduction, done" }, { "identifier": "MalwareDetectionDNN", "path": "core/defense/md_dnn.py", "snippet": "class MalwareDetectionDNN(nn.Module):\n def __init__(self, input_size, n_classes, device='cpu', name='DNN', **kwargs):\n \"\"\"\n 初始化恶意软件检测器\n\n 参数:\n ----------\n @param input_size: 整数,输入向量的维度数量。\n @param n_classes: 整数,表示分类的数量,例如二分类问题中n=2。\n @param device: 字符串,可以是'cpu'或'cuda',表示模型应该在CPU还是GPU上运行。\n @param name: 字符串,用于命名模型。\n \"\"\"\n super(MalwareDetectionDNN, self).__init__() # 调用父类初始化\n self.input_size = input_size # 定义输入尺寸\n self.n_classes = n_classes # 定义分类数量\n self.device = device # 定义运行设备\n self.name = name # 定义模型名称\n\n self.parse_args(**kwargs) # 解析额外参数\n\n self.dense_layers = [] # 初始化一个空的密集层列表\n \n # 检查是否至少有一个隐藏层\n if len(self.dense_hidden_units) >= 1:\n # 添加第一个密集层\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n else:\n # 如果没有隐藏层,抛出异常\n raise ValueError(\"Expect at least one hidden layer.\")\n\n # 为每一对连续的隐藏单元添加一个密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[i], \n self.dense_hidden_units[i + 1]))\n \n # 添加最后一个连接到输出层的密集层\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[-1], self.n_classes))\n \n # 将密集层添加到模型中以进行跟踪\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('nn_model_layer_{}'.format(idx_i), dense_layer)\n\n # 根据参数选择使用SELU或ReLU激活函数\n if self.smooth:\n self.activation_func = F.selu # 使用SELU激活函数\n else:\n self.activation_func = F.relu # 使用ReLU激活函数\n\n # 定义模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'md_dnn') + '_' + self.name,\n 'model.pth')\n \n # 日志中打印模型的结构信息\n logger.info('========================================dnn model architecture===============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None,\n dropout=0.6,\n alpha_=0.2,\n smooth=False,\n **kwargs\n ):\n \"\"\"\n 解析并设置网络的超参数。\n\n 参数:\n ----------\n dense_hidden_units : list, 可选\n 网络中每个隐藏层的单元数。如果没有指定,则默认为两个隐藏层,每层200个单元。\n dropout : float, 可选\n dropout正则化的比率,默认为0.6。\n alpha_ : float, 可选\n 某些激活函数的参数,默认为0.2。\n smooth : bool, 可选\n 是否使用平滑的激活函数,默认为False。\n **kwargs : dict\n 其他超参数。\n \"\"\"\n\n # 如果用户没有指定隐藏层,使用默认的配置\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n # 如果用户指定了一个列表,使用它\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n # 否则抛出一个异常\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout, alpha和smooth参数\n self.dropout = dropout\n self.alpha_ = alpha_\n self.smooth = smooth\n\n # 从kwargs中获取并设置proc_number\n self.proc_number = kwargs.get('proc_number', None) # 如果不存在,则返回None\n\n # 如果还有其他参数,记录警告,因为这些参数可能是未知的\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n\n def forward(self, x):\n \"\"\"\n 使输入数据 x 通过神经网络\n \n 参数\n ----------\n @param x: 2D张量,特征表示\n \"\"\"\n # 遍历神经网络的每一层,除了最后一层\n for dense_layer in self.dense_layers[:-1]:\n x = self.activation_func(dense_layer(x)) # 使用激活函数处理每一层的输出\n\n # 对处理过的数据进行 dropout 操作,用于防止过拟合\n latent_representation = F.dropout(x, self.dropout, training=self.training)\n \n # 用最后一层进行处理,得到logits(未归一化的预测或分类得分)\n logits = self.dense_layers[-1](latent_representation)\n return logits\n\n def inference(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n\n # 将所有批次的置信度垂直堆叠成一个张量\n confidences = torch.vstack(confidences)\n # 将所有批次的真实标签连接成一个张量\n gt_labels = torch.cat(gt_labels, dim=0)\n \n return confidences, gt_labels\n\n def inference_dae(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n return confidences, gt_labels\n\n\n def get_important_attributes(self, test_data_producer, target_label=1):\n \"\"\"\n 使用集成梯度(Integrated Gradients)方法获取重要的属性/特征\n\n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n @param target_label: 目标标签,默认为1\n \n 返回值\n ----------\n 返回重要的属性/特征\n \"\"\"\n attributions = [] # 存储属性或特征的重要性得分\n gt_labels = [] # 存储真实标签\n\n # 定义一个使用集成梯度方法的包装器\n def _ig_wrapper(_x):\n logits = self.forward(_x)\n return F.softmax(logits, dim=-1)\n\n # 初始化集成梯度对象\n ig = IntegratedGradients(_ig_wrapper)\n\n # 遍历测试数据集\n for i, (x, y) in enumerate(test_data_producer):\n # 将数据和标签转移到指定的设备上\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 使x能够计算梯度\n x.requires_grad = True\n # 定义基线,用于集成梯度的计算\n baseline = torch.zeros_like(x, dtype=torch.double, device=self.device)\n # 计算属性的重要性\n attribution_bs = ig.attribute(x,\n baselines=baseline,\n target=target_label)\n # 将所有批次的属性垂直堆叠\n attribution = torch.hstack(attribution_bs)\n # 保存得到的属性重要性得分和真实标签\n attributions.append(attribution.clone().detach().cpu().numpy())\n gt_labels.append(y.clone().detach().cpu().numpy())\n # 将真实标签保存为.npy文件\n np.save('./labels', np.concatenate(gt_labels))\n \n return np.vstack(attributions)\n\n\n def inference_batch_wise(self, x):\n \"\"\"\n 仅支持恶意软件样本的批量推理\n \n 参数\n ----------\n @param x: 输入数据的张量\n \n 返回值\n ----------\n 返回推理的置信度和标签\n \"\"\"\n # 确保输入是一个张量\n assert isinstance(x, torch.Tensor)\n \n # 获得模型的输出\n logit = self.forward(x)\n \n # 返回每个样本的置信度和一个与logit形状相同的全1数组(表示恶意软件样本)\n return torch.softmax(logit, dim=-1).detach().cpu().numpy(), np.ones((logit.size()[0],))\n\n\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并进行评估\n\n 参数\n --------\n @param test_data_producer: torch.DataLoader, 用于生成测试数据的数据加载器\n \"\"\"\n # 进行评估\n confidence, y_true = self.inference(test_data_producer)\n y_pred = confidence.argmax(1).cpu().numpy() # 预测标签\n y_true = y_true.cpu().numpy() # 真实标签\n \n # print(\"y_true.shape:\", y_true.shape)\n # print(\"y_pred.shape:\", y_pred.shape)\n \n # 使用sklearn的评估指标进行评估\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n accuracy = accuracy_score(y_true, y_pred)\n b_accuracy = balanced_accuracy_score(y_true, y_pred)\n \n MSG = \"The accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(accuracy * 100))\n \n MSG = \"The balanced accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(b_accuracy * 100))\n\n # 检查数据中是否存在缺失的类别\n if np.any([np.all(y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"class absent.\")\n return\n\n # 计算混淆矩阵\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n fpr = fp / float(tn + fp) # 计算假阳性率\n fnr = fn / float(tp + fn) # 计算假阴性率\n f1 = f1_score(y_true, y_pred, average='binary') # 计算F1分数\n\n print(\"Other evaluation metrics we may need:\")\n MSG = \"False Negative Rate (FNR) is {:.5f}%、False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%\"\n logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))\n\n\n def customize_loss(self, logits, gt_labels, representation=None, mini_batch_idx=None):\n \"\"\"\n 自定义损失函数\n\n 参数\n --------\n @param logits: Tensor, 模型的输出\n @param gt_labels: Tensor, 真实的标签\n @param representation: Tensor, 可选参数,表示特征表示\n @param mini_batch_idx: Int, 可选参数,表示小批次的索引\n \n 返回值\n --------\n 返回交叉熵损失\n \"\"\"\n return F.cross_entropy(logits, gt_labels)\n\n\n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., weight_sampling=0.5, verbose=True):\n \"\"\"\n 训练恶意软件检测器,根据验证集上的交叉熵损失选择最佳模型。\n\n 参数\n ----------\n @param train_data_producer: 对象, 用于生成一批训练数据的迭代器\n @param validation_data_producer: 对象, 用于生成验证数据的迭代器\n @param epochs: 整数, 训练的周期数\n @param lr: 浮点数, Adam优化器的学习率\n @param weight_decay: 浮点数, 惩罚因子\n @param verbose: 布尔值, 是否显示详细的日志\n \"\"\"\n # 初始化优化器\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 记录验证集上的最佳准确率\n best_epoch = 0 # 记录最佳准确率对应的周期\n total_time = 0. # 总的训练时间\n\n # 获取训练数据批次的数量\n nbatches = len(train_data_producer)\n \n # 进行指定次数的训练周期\n for i in range(epochs):\n # 设置模型为训练模式\n self.train()\n # 初始化列表用于保存每批数据的损失值和准确率\n losses, accuracies = [], []\n\n # 对每个训练数据批次进行遍历\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 将数据转移到指定的计算设备(例如GPU或CPU)\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n\n # 记录开始训练的时间\n start_time = time.time()\n\n # 清空之前累积的梯度\n optimizer.zero_grad() \n \n # 对输入数据进行前向传播\n logits = self.forward(x_train) \n \n # 根据模型的输出和真实标签计算损失\n loss_train = self.customize_loss(logits, y_train) \n\n # 对损失进行反向传播\n loss_train.backward()\n \n # 使用优化器更新模型参数\n optimizer.step()\n\n # 计算训练这批数据所花费的总时间\n total_time += time.time() - start_time\n \n # 计算这批数据上的准确率\n acc_train = (logits.argmax(1) == y_train).sum().item() / x_train.size()[0]\n \n # 将时间转换为分钟和秒\n mins, secs = int(total_time / 60), int(total_time % 60)\n \n # 将这批数据的损失和准确率加入到列表中\n losses.append(loss_train.item())\n accuracies.append(acc_train)\n\n # 如果开启了详细输出模式,显示当前训练进度和这批数据上的损失和准确率\n if verbose:\n logger.info(f'小批次: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | 训练时间为 {mins:.0f} 分钟, {secs} 秒。')\n logger.info(f'训练损失(小批次级别): {losses[-1]:.4f} | 训练精度: {acc_train * 100:.2f}')\n\n\n self.eval() # 将模型设置为评估模式\n avg_acc_val = []\n\n with torch.no_grad(): # 确保在评估模式下不进行梯度的计算\n for x_val, y_val in validation_data_producer:\n # 将数据移动到指定设备(例如GPU或CPU)上,并确保数据的类型为双精度浮点数和长整型\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 使用模型进行前向传播,得到输出结果\n logits = self.forward(x_val)\n \n # 计算验证数据上的准确率\n acc_val = (logits.argmax(1) == y_val).sum().item() / x_val.size()[0]\n \n # 保存每一批验证数据的准确率\n avg_acc_val.append(acc_val)\n \n # 计算所有验证数据的平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前周期的验证精度超过之前的最佳验证精度\n if avg_acc_val >= best_avg_acc:\n # 更新最佳验证精度\n best_avg_acc = avg_acc_val\n best_epoch = i\n \n # 检查模型保存路径是否存在,如果不存在,则创建\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n \n # 保存当前的模型参数\n torch.save(self.state_dict(), self.model_save_path)\n \n # 如果开启了详细输出模式,显示模型保存路径\n if verbose:\n print(f'模型保存在路径: {self.model_save_path}')\n\n # 如果开启了详细输出模式,显示训练损失、训练精度、验证精度和最佳验证精度\n if verbose:\n logger.info(f'训练损失(周期级别): {np.mean(losses):.4f} | 训练精度: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'验证精度: {avg_acc_val * 100:.2f} | 最佳验证精度: {best_avg_acc * 100:.2f} 在第 {best_epoch} 个周期')\n\n def load(self):\n \"\"\"\n 从磁盘加载模型参数\n \"\"\"\n self.load_state_dict(torch.load(self.model_save_path))" }, { "identifier": "DetectorTemplate", "path": "core/defense/amd_template.py", "snippet": "class DetectorTemplate(object):\n def __init__(self):\n self.tau = None # 阈值变量\n self.is_detector_enabled = True # 表示检测器是否启用的标志\n\n def forward(self, x):\n \"\"\"\n 类预测与密度估计\n \"\"\"\n raise NotImplementedError\n\n def get_threshold(self):\n \"\"\"\n 计算拒绝异常值的阈值\n \"\"\"\n raise NotImplementedError\n\n def get_tau_sample_wise(self):\n \"\"\"\n 获取每个样本的tau值\n \"\"\"\n raise NotImplementedError\n\n def indicator(self):\n \"\"\"\n 返回一个布尔标志向量,指示是否拒绝一个样本\n \"\"\"\n raise NotImplementedError" }, { "identifier": "config", "path": "config.py", "snippet": "def parser_config():" }, { "identifier": "utils", "path": "tools/utils.py", "snippet": "ENC_KEY = 'cab228a122d3486bac7fab148e8b5aba'\n MSG = \"No such directory or file {} exists!\".format(sample_dir)\n MSG = \"A directory or a list of paths are allowed!\"\ndef pool_initializer():\ndef retrive_files_set(base_dir, dir_ext, file_ext):\n def get_file_name(root_dir, file_ext):\ndef check_dir(sample_dir):\ndef dump_joblib(data, path):\ndef read_joblib(path):\ndef load_json(json_path):\ndef dump_json(obj_dict, file_path):\ndef dump_pickle(data, path, use_gzip=False):\ndef read_pickle(path, use_gzip=False):\ndef dump_pickle_frd_space(data, path):\ndef read_pickle_frd_space(path):\ndef dump_list_of_lists(data, path):\ndef read_list_of_lists(path):\ndef mkdir(target):\ndef read_txt(path, mode='r'):\ndef dump_txt(data_str, path, mode='w'):\ndef read_file_by_fileinput(file_path, inplace=True):\n def __init__(self, manager, use_cache=True):\n def is_cached(self, key):\n def reset(self):\n def get(self, key):\n def cache(self, key, img, lbl):\ndef build_kwargs(keys, arg_dict):\ndef inverse_kwargs(vars):\ndef save_args(fout, args):\ndef load_args(fout):\ndef get_group_args(args, args_parser, title):\ndef tensor_coo_sp_to_ivs(sparse_tensor):\ndef ivs_to_tensor_coo_sp(ivs, device='cpu'):\ndef sp_to_symmetric_sp(sparse_mx):\ndef sparse_mx_to_torch_sparse_tensor(sparse_mx):\ndef to_tensor(feature_x=None, labels=None, device='cpu'):\n def _to_torch_tensor(mat):\ndef to_device(feature_x=None, labels=None, device='cpu'):\ndef psn(x_tensor, prob, lower_value=0., upper_value=1.):\n def __init__(self):\n def __call__(self, module):\ndef round_x(x, alpha=0.5):\ndef get_x0(x, rounding_threshold=0.5, is_sample=False):\ndef or_tensors(x_1, x_2):\ndef xor_tensors(x_1, x_2):\ndef get_mal_data(x_batch, y_batch):\ndef get_mal_ben_data(x_batch, y_batch):\ndef java_class_name2smali_name(cls):\ndef remove_duplicate(components):\ndef crypt_identifier(idf, seed=2345):\n def md5_transform():\ndef random_string(code):\n def sha1_transform():\ndef string_on_code(code):\n def md5_transform():\ndef random_name(seed=2345, code='abc'):\ndef apply_encryption(base_string):\ndef get_sha256(file_path):\nclass SimplifyClass:\nclass NonnegWeightConstraint(object):" } ]
import time import os.path as path import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np from core.attack.max import Max from core.attack.stepwise_max import StepwiseMax from core.defense.md_dnn import MalwareDetectionDNN from core.defense.amd_template import DetectorTemplate from config import config, logging, ErrorHandler from tools import utils from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score
19,741
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
""" @article{grosse2017statistical, title={On the (statistical) detection of adversarial examples}, author={Grosse, Kathrin and Manoharan, Praveen and Papernot, Nicolas and Backes, Michael and McDaniel, Patrick}, journal={arXiv preprint arXiv:1702.06280}, year={2017} } @inproceedings{carlini2017adversarial, title={Adversarial examples are not easily detected: Bypassing ten detection methods}, author={Carlini, Nicholas and Wagner, David}, booktitle={Proceedings of the 10th ACM workshop on artificial intelligence and security}, pages={3--14}, year={2017} } This implementation is not an official version, but adapted from: https://github.com/carlini/nn_breaking_detection """ from __future__ import absolute_import from __future__ import division from __future__ import print_function
logger = logging.getLogger('core.defense.amd_dnn_plus')
4
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_taskgroups.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ |\n | Legends of Runeterra | ✅ |\n | Teamfight Tactics | ✅ |\n | Valorant | ✅ |\n\n Example:\n ```python\n async with RiotAPIClient(\n default_headers={\"X-Riot-Token\": <API_KEY>}\n ) as client:\n summoner = await client.get_lol_summoner_v4_by_name(region=\"na1\", name=\"Not a Whale\")\n assert summoner[\"summonerLevel\"] > 200\n ```\n \"\"\"\n\n Region = Literal[\n \"americas\", \"europe\", \"asia\", \"sea\", \"esports\",\n \"br1\", \"eun1\", \"euw1\", \"jp1\", \"kr\", \"la1\", \"la2\",\n \"na1\", \"oc1\", \"tr1\", \"ru\", \"ph2\", \"sg2\", \"th2\", \"tw2\", \"vn2\",\n \"ap\", \"br\", \"eu\", \"kr\", \"latam\", \"na\",\n ] | _str\n\n def __init__(\n self,\n *,\n base_url: str = \"https://{region}.api.riotgames.com\",\n default_params: dict[str, Any] = {},\n default_headers: dict[str, str] = {\"X-Riot-Token\": \"\"},\n default_queries: dict[str, str] = {},\n middlewares: list[Middleware] = [\n json_response_middleware(),\n http_error_middleware(),\n rate_limiter_middleware(RiotAPIRateLimiter()),\n ],\n ) -> None:\n super().__init__(\n base_url=base_url,\n default_params=default_params,\n default_headers=default_headers,\n default_queries=default_queries,\n middlewares=middlewares\n )\n\n # Account Endpoints\n\n async def get_account_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-puuid/{puuid}\")\n\n async def get_account_v1_by_riot_id(self, *, region: Region = ..., game_name: str = ..., tag_line: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}\")\n\n async def get_account_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/me\")\n\n async def get_account_v1_active_shard_by_puuid(self, *, region: Region = ..., puuid: str = ..., game: str = ...) -> RiotAPISchema.AccountV1ActiveShard:\n return await self.invoke(\"GET\", \"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}\")\n\n # League of Legends Endpoints\n\n async def get_lol_champion_v3_rotation(self, *, region: Region = ...) -> RiotAPISchema.LolChampionV3Rotation:\n return await self.invoke(\"GET\", \"/lol/platform/v3/champion-rotations\")\n\n async def get_lol_champion_v4_mastery_by_summoner(self, *, region: Region = ..., summoner_id: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_top_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_mastery_by_puuid(self, *, region: Region = ..., puuid: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}\")\n\n async def get_lol_champion_v4_top_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_players_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-summoner/{summoner_id}\")\n\n async def get_lol_clash_v1_players_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_team(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Team:\n return await self.invoke(\"GET\", \"/lol/clash/v1/teams/{id}\")\n\n async def get_lol_clash_v1_tournament_by_team(self, *, region: Region = ..., team_id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/by-team/{team_id}\")\n\n async def get_lol_clash_v1_tournament(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/{id}\")\n\n async def get_lol_clash_v1_tournaments(self, *, region: Region = ...) -> list[RiotAPISchema.LolClashV1Tournament]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments\")\n\n async def get_lol_league_v4_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/by-summoner/{summoner_id}\")\n\n async def get_lol_league_v4_challenger_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/challengerleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_grandmaster_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/grandmasterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_master_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/masterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_entries_by_division(\n self, *, region: Region = ..., queue: str = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/{queue}/{tier}/{division}\")\n\n async def get_lol_league_v4_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/leagues/{id}\")\n\n async def get_lol_match_v5_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5Match:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}\")\n\n async def get_lol_match_v5_match_timeline(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5MatchTimeline:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}/timeline\")\n\n async def get_lol_match_v5_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/by-puuid/{puuid}/ids\")\n\n async def get_lol_spectator_v4_active_game_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> RiotAPISchema.LolSpectatorV4Game:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/active-games/by-summoner/{summoner_id}\")\n\n async def get_lol_spectator_v4_featured_games(self, *, region: Region = ...) -> RiotAPISchema.LolSpectatorV4GameList:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/featured-games\")\n\n async def get_lol_status_v4_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lol/status/v4/platform-data\")\n\n async def get_lol_summoner_v4_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/{id}\")\n\n async def get_lol_summoner_v4_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-name/{name}\")\n\n async def get_lol_summoner_v4_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-puuid/{puuid}\")\n\n async def get_lol_summoner_v4_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/me\")\n\n async def get_lol_summoner_v4_by_rso_puuid(self, *, region: Region = ..., rso_puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/fulfillment/v1/summoners/by-puuid/{rso_puuid}\")\n\n # Teamfight Tactics Endpoints\n\n async def get_tft_league_v1_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/by-summoner/{summoner_id}\")\n\n async def get_tft_league_v1_challenger_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/challenger\")\n\n async def get_tft_league_v1_grandmaster_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/grandmaster\")\n\n async def get_tft_league_v1_master_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/master\")\n\n async def get_tft_league_v1_entries_by_division(\n self, *, region: Region = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/{tier}/{division}\")\n\n async def get_tft_league_v1_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/leagues/{id}\")\n\n async def get_tft_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftMatchV1Match:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/{id}\")\n\n async def get_tft_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_tft_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/tft/status/v1/platform-data\")\n\n async def get_tft_summoner_v1_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/{id}\")\n\n async def get_tft_summoner_v1_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-name/{name}\")\n\n async def get_tft_summoner_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-puuid/{puuid}\")\n\n async def get_tft_summoner_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/me\")\n\n # Legends of Runeterra Endpoints\n\n async def get_lor_ranked_v1_leaderboard(self, *, region: Region = ...) -> RiotAPISchema.LorRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/lor/ranked/v1/leaderboards\")\n\n async def get_lor_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LorMatchV1Match:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/{id}\")\n\n async def get_lor_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[str]:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_lor_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lor/status/v1/platform-data\")\n\n # Valorant Endpoints\n\n async def get_val_content_v1_contents(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.ValContentV1Contents:\n return await self.invoke(\"GET\", \"/val/content/v1/contents\")\n\n async def get_val_ranked_v1_leaderboard_by_act(self, *, region: Region = ..., act_id: str = ...) -> RiotAPISchema.ValRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/val/ranked/v1/leaderboards/by-act/{act_id}\")\n\n async def get_val_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.ValMatchV1Match:\n return await self.invoke(\"GET\", \"/val/match/v1/matches/{id}\")\n\n async def get_val_match_v1_matchlist_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.ValMatchV1Matchlist:\n return await self.invoke(\"GET\", \"/val/match/v1/matchlists/by-puuid/{puuid}\")\n\n async def get_val_match_v1_recent_matches_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.ValMatchV1RecentMatches:\n return await self.invoke(\"GET\", \"/val/match/v1/recent-matches/by-queue/{queue}\")\n\n async def get_val_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/val/status/v1/platform-data\")" }, { "identifier": "async_to_sync", "path": "pulsefire/functools.py", "snippet": "def async_to_sync(runner: Callable[[Awaitable[Any]], Any] = asyncio.run):\n \"\"\"Convert a coroutine function to run synchronously. Use as decorator `@async_to_sync()`.\n\n Example:\n ```python\n @async_to_sync()\n async def sample_func(number: int):\n ...\n \n sample_func(0)\n ```\n\n Parameters:\n runner: A callable that runs the awaitable synchronously.\n\n Raises:\n TypeError: When `func` is not a coroutine function.\n \"\"\"\n\n def decorator[**P, R](func: Callable[P, Awaitable[R]]) -> Callable[P, R]:\n if not inspect.iscoroutinefunction(func):\n raise TypeError(f\"{func} is not a coroutine function\")\n\n @functools.wraps(func)\n def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n return runner(func(*args, **kwargs))\n\n return wrapper\n\n return decorator" }, { "identifier": "RiotAPISchema", "path": "pulsefire/schemas.py", "snippet": "class RiotAPISchema:\n\n # Account Types\n\n AccountV1Account = TypedDict(\"AccountV1Account\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n })\n AccountV1ActiveShard = TypedDict(\"AccountV1ActiveShard\", {\n \"puuid\": str,\n \"game\": str,\n \"activeShard\": str,\n })\n\n # League of Legends Types\n\n LolChampionV3Rotation = TypedDict(\"LolChampionV3Rotation\", {\n \"freeChampionIds\": list[int],\n \"freeChampionIdsForNewPlayers\": list[int],\n \"maxNewPlayerLevel\": int\n })\n LolChampionV4Mastery = TypedDict(\"LolChampionV4Mastery\", {\n \"puuid\": str,\n \"championId\": int,\n \"championLevel\": int,\n \"championPoints\": int,\n \"lastPlayTime\": int,\n \"championPointsSinceLastLevel\": int,\n \"championPointsUntilNextLevel\": int,\n \"chestGranted\": bool,\n \"tokensEarned\": int,\n \"summonerId\": str\n })\n LolClashV1Player = TypedDict(\"LolClashV1Player\", {\n \"summonerId\": str,\n \"teamId\": str,\n \"position\": str,\n \"role\": str,\n })\n LolClashV1Team = TypedDict(\"LolClashV1Team\", {\n \"id\": str,\n \"tournamentId\": int,\n \"name\": str,\n \"iconId\": int,\n \"tier\": int,\n \"captain\": str,\n \"abbreviation\": str,\n \"players\": list[LolClashV1Player],\n })\n LolClashV1TournamentSchedule = TypedDict(\"LolClashV1TournamentSchedule\", {\n \"id\": int,\n \"registrationTime\": int,\n \"startTime\": int,\n \"cancelled\": bool,\n })\n LolClashV1Tournament = TypedDict(\"LolClashV1Tournament\", {\n \"id\": int,\n \"themeId\": int,\n \"nameKey\": str,\n \"nameKeySecondary\": str,\n \"schedule\": list[LolClashV1TournamentSchedule]\n })\n LolLeagueV4LeagueEntryMiniSeries = TypedDict(\"LolLeagueV4LeagueEntryMiniSeries\", {\n \"losses\": int,\n \"progress\": str,\n \"target\": int,\n \"wins\": int,\n })\n LolLeagueV4LeagueEntry = TypedDict(\"LolLeagueV4LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n LolLeagueV4LeagueFullEntry = TypedDict(\"LolLeagueV4LeagueFullEntry\", {\n \"leagueId\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n LolLeagueV4League = TypedDict(\"LolLeagueV4League\", {\n \"tier\": str,\n \"leagueId\": str,\n \"queue\": str,\n \"name\": str,\n \"entries\": list[LolLeagueV4LeagueEntry]\n })\n LolMatchV5MatchMetadata = TypedDict(\"LolMatchV5MatchMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTeamObjective = TypedDict(\"LolMatchV5MatchTeamObjective\", {\n \"first\": bool,\n \"kills\": int\n })\n LolMatchV5MatchInfoParticipantChallenges = TypedDict(\"LolMatchV5MatchInfoParticipantChallenges\", {\n \"12AssistStreakCount\": int,\n \"abilityUses\": int,\n \"acesBefore15Minutes\": int,\n \"alliedJungleMonsterKills\": int,\n \"baronTakedowns\": int,\n \"blastConeOppositeOpponentCount\": int,\n \"bountyGold\": int,\n \"buffsStolen\": int,\n \"completeSupportQuestInTime\": int,\n \"controlWardTimeCoverageInRiverOrEnemyHalf\": NotRequired[float],\n \"controlWardsPlaced\": int,\n \"damagePerMinute\": float,\n \"damageTakenOnTeamPercentage\": float,\n \"dancedWithRiftHerald\": int,\n \"deathsByEnemyChamps\": int,\n \"dodgeSkillShotsSmallWindow\": int,\n \"doubleAces\": int,\n \"dragonTakedowns\": int,\n \"earliestBaron\": float,\n \"earlyLaningPhaseGoldExpAdvantage\": int,\n \"effectiveHealAndShielding\": float,\n \"elderDragonKillsWithOpposingSoul\": int,\n \"elderDragonMultikills\": int,\n \"enemyChampionImmobilizations\": int,\n \"enemyJungleMonsterKills\": int,\n \"epicMonsterKillsNearEnemyJungler\": int,\n \"epicMonsterKillsWithin30SecondsOfSpawn\": int,\n \"epicMonsterSteals\": int,\n \"epicMonsterStolenWithoutSmite\": int,\n \"firstTurretKilled\": int,\n \"firstTurretKilledTime\": NotRequired[float],\n \"flawlessAces\": int,\n \"fullTeamTakedown\": int,\n \"gameLength\": float,\n \"getTakedownsInAllLanesEarlyJungleAsLaner\": NotRequired[int],\n \"goldPerMinute\": float,\n \"hadOpenNexus\": int,\n \"immobilizeAndKillWithAlly\": int,\n \"initialBuffCount\": int,\n \"initialCrabCount\": int,\n \"jungleCsBefore10Minutes\": float,\n \"junglerTakedownsNearDamagedEpicMonster\": int,\n \"kTurretsDestroyedBeforePlatesFall\": int,\n \"kda\": float,\n \"killAfterHiddenWithAlly\": int,\n \"killParticipation\": float,\n \"killedChampTookFullTeamDamageSurvived\": int,\n \"killingSprees\": int,\n \"killsNearEnemyTurret\": int,\n \"killsOnOtherLanesEarlyJungleAsLaner\": NotRequired[int],\n \"killsOnRecentlyHealedByAramPack\": int,\n \"killsUnderOwnTurret\": int,\n \"killsWithHelpFromEpicMonster\": int,\n \"knockEnemyIntoTeamAndKill\": int,\n \"landSkillShotsEarlyGame\": int,\n \"laneMinionsFirst10Minutes\": int,\n \"laningPhaseGoldExpAdvantage\": int,\n \"legendaryCount\": int,\n \"lostAnInhibitor\": int,\n \"maxCsAdvantageOnLaneOpponent\": float,\n \"maxKillDeficit\": int,\n \"maxLevelLeadLaneOpponent\": int,\n \"mejaisFullStackInTime\": int,\n \"moreEnemyJungleThanOpponent\": float,\n \"multiKillOneSpell\": int,\n \"multiTurretRiftHeraldCount\": int,\n \"multikills\": int,\n \"multikillsAfterAggressiveFlash\": int,\n \"mythicItemUsed\": NotRequired[int],\n \"outerTurretExecutesBefore10Minutes\": int,\n \"outnumberedKills\": int,\n \"outnumberedNexusKill\": int,\n \"perfectDragonSoulsTaken\": int,\n \"perfectGame\": int,\n \"pickKillWithAlly\": int,\n \"playedChampSelectPosition\": NotRequired[int],\n \"poroExplosions\": int,\n \"quickCleanse\": int,\n \"quickFirstTurret\": int,\n \"quickSoloKills\": int,\n \"riftHeraldTakedowns\": int,\n \"saveAllyFromDeath\": int,\n \"scuttleCrabKills\": int,\n \"shortestTimeToAceFromFirstTakedown\": NotRequired[float],\n \"skillshotsDodged\": int,\n \"skillshotsHit\": int,\n \"snowballsHit\": int,\n \"soloBaronKills\": int,\n \"soloKills\": int,\n \"stealthWardsPlaced\": int,\n \"survivedSingleDigitHpCount\": int,\n \"survivedThreeImmobilizesInFight\": int,\n \"takedownOnFirstTurret\": int,\n \"takedowns\": int,\n \"takedownsAfterGainingLevelAdvantage\": int,\n \"takedownsBeforeJungleMinionSpawn\": int,\n \"takedownsFirstXMinutes\": int,\n \"takedownsInAlcove\": int,\n \"takedownsInEnemyFountain\": int,\n \"teamBaronKills\": int,\n \"teamDamagePercentage\": float,\n \"teamElderDragonKills\": int,\n \"teamRiftHeraldKills\": int,\n \"tookLargeDamageSurvived\": int,\n \"turretPlatesTaken\": int,\n \"turretTakedowns\": int,\n \"turretsTakenWithRiftHerald\": int,\n \"twentyMinionsIn3SecondsCount\": int,\n \"twoWardsOneSweeperCount\": int,\n \"unseenRecalls\": int,\n \"visionScoreAdvantageLaneOpponent\": float,\n \"visionScorePerMinute\": float,\n \"wardTakedowns\": int,\n \"wardTakedownsBefore20M\": int,\n \"wardsGuarded\": int,\n \"earliestDragonTakedown\": NotRequired[float],\n \"baronBuffGoldAdvantageOverThreshold\": NotRequired[int],\n \"teleportTakedowns\": NotRequired[int],\n \"fastestLegendary\": NotRequired[float],\n \"highestChampionDamage\": NotRequired[int],\n \"highestCrowdControlScore\": NotRequired[int],\n \"junglerKillsEarlyJungle\": NotRequired[int],\n \"killsOnLanersEarlyJungleAsJungler\": NotRequired[int],\n \"fasterSupportQuestCompletion\": NotRequired[int],\n \"highestWardKills\": NotRequired[int],\n \"soloTurretsLategame\": NotRequired[int],\n \"thirdInhibitorDestroyedTime\": NotRequired[float],\n }, total=False) | dict[str, int | float]\n LolMatchV5MatchInfoParticipantPerksStatPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStatPerks\", {\n \"defense\": int,\n \"flex\": int,\n \"offense\": int\n })\n LolMatchV5MatchInfoParticipantPerksStyleSelection = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyleSelection\", {\n \"perk\": int,\n \"var1\": int,\n \"var2\": int,\n \"var3\": int}\n )\n LolMatchV5MatchInfoParticipantPerksStyle = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyle\", {\n \"description\": str,\n \"selections\": list[LolMatchV5MatchInfoParticipantPerksStyleSelection],\n \"style\": int\n })\n LolMatchV5MatchInfoParticipantPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerks\", {\n \"statPerks\": LolMatchV5MatchInfoParticipantPerksStatPerks,\n \"styles\": list[LolMatchV5MatchInfoParticipantPerksStyle]\n })\n LolMatchV5MatchInfoParticipantMissions = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"playerScore0\": float,\n \"playerScore1\": float,\n \"playerScore10\": float,\n \"playerScore11\": float,\n \"playerScore2\": float,\n \"playerScore3\": float,\n \"playerScore4\": float,\n \"playerScore5\": float,\n \"playerScore6\": float,\n \"playerScore7\": float,\n \"playerScore8\": float,\n \"playerScore9\": float,\n })\n LolMatchV5MatchInfoParticipant = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"allInPings\": int,\n \"assistMePings\": int,\n \"assists\": int,\n \"baitPings\": int,\n \"baronKills\": int,\n \"basicPings\": int,\n \"bountyLevel\": int,\n \"challenges\": NotRequired[LolMatchV5MatchInfoParticipantChallenges],\n \"champExperience\": int,\n \"champLevel\": int,\n \"championId\": int,\n \"championName\": str,\n \"championTransform\": int,\n \"commandPings\": int,\n \"consumablesPurchased\": int,\n \"damageDealtToBuildings\": int,\n \"damageDealtToObjectives\": int,\n \"damageDealtToTurrets\": int,\n \"damageSelfMitigated\": int,\n \"dangerPings\": int,\n \"deaths\": int,\n \"detectorWardsPlaced\": int,\n \"doubleKills\": int,\n \"dragonKills\": int,\n \"eligibleForProgression\": bool,\n \"enemyMissingPings\": int,\n \"enemyVisionPings\": int,\n \"firstBloodAssist\": bool,\n \"firstBloodKill\": bool,\n \"firstTowerAssist\": bool,\n \"firstTowerKill\": bool,\n \"gameEndedInEarlySurrender\": bool,\n \"gameEndedInSurrender\": bool,\n \"getBackPings\": int,\n \"goldEarned\": int,\n \"goldSpent\": int,\n \"holdPings\": int,\n \"individualPosition\": str,\n \"inhibitorKills\": int,\n \"inhibitorTakedowns\": int,\n \"inhibitorsLost\": int,\n \"item0\": int,\n \"item1\": int,\n \"item2\": int,\n \"item3\": int,\n \"item4\": int,\n \"item5\": int,\n \"item6\": int,\n \"itemsPurchased\": int,\n \"killingSprees\": int,\n \"kills\": int,\n \"lane\": str,\n \"largestCriticalStrike\": int,\n \"largestKillingSpree\": int,\n \"largestMultiKill\": int,\n \"longestTimeSpentLiving\": int,\n \"magicDamageDealt\": int,\n \"magicDamageDealtToChampions\": int,\n \"magicDamageTaken\": int,\n \"missions\": NotRequired[LolMatchV5MatchInfoParticipantMissions],\n \"needVisionPings\": int,\n \"neutralMinionsKilled\": int,\n \"nexusKills\": int,\n \"nexusLost\": int,\n \"nexusTakedowns\": int,\n \"objectivesStolen\": int,\n \"objectivesStolenAssists\": int,\n \"onMyWayPings\": int,\n \"participantId\": int,\n \"pentaKills\": int,\n \"perks\": LolMatchV5MatchInfoParticipantPerks,\n \"physicalDamageDealt\": int,\n \"physicalDamageDealtToChampions\": int,\n \"physicalDamageTaken\": int,\n \"placement\": int,\n \"playerAugment1\": int,\n \"playerAugment2\": int,\n \"playerAugment3\": int,\n \"playerAugment4\": int,\n \"playerSubteamId\": int,\n \"playerScore0\": NotRequired[float],\n \"playerScore1\": NotRequired[float],\n \"playerScore10\": NotRequired[float],\n \"playerScore11\": NotRequired[float],\n \"playerScore2\": NotRequired[float],\n \"playerScore3\": NotRequired[float],\n \"playerScore4\": NotRequired[float],\n \"playerScore5\": NotRequired[float],\n \"playerScore6\": NotRequired[float],\n \"playerScore7\": NotRequired[float],\n \"playerScore8\": NotRequired[float],\n \"playerScore9\": NotRequired[float],\n \"profileIcon\": int,\n \"pushPings\": int,\n \"puuid\": str,\n \"quadraKills\": int,\n \"riotIdName\": NotRequired[str],\n \"riotIdTagline\": str,\n \"riotIdGameName\": NotRequired[str],\n \"role\": str,\n \"sightWardsBoughtInGame\": int,\n \"spell1Casts\": int,\n \"spell2Casts\": int,\n \"spell3Casts\": int,\n \"spell4Casts\": int,\n \"subteamPlacement\": int,\n \"summoner1Casts\": int,\n \"summoner1Id\": int,\n \"summoner2Casts\": int,\n \"summoner2Id\": int,\n \"summonerId\": str,\n \"summonerLevel\": int,\n \"summonerName\": str,\n \"teamEarlySurrendered\": bool,\n \"teamId\": int,\n \"teamPosition\": str,\n \"timeCCingOthers\": int,\n \"timePlayed\": int,\n \"totalAllyJungleMinionsKilled\": int,\n \"totalDamageDealt\": int,\n \"totalDamageDealtToChampions\": int,\n \"totalDamageShieldedOnTeammates\": int,\n \"totalDamageTaken\": int,\n \"totalEnemyJungleMinionsKilled\": int,\n \"totalHeal\": int,\n \"totalHealsOnTeammates\": int,\n \"totalMinionsKilled\": int,\n \"totalTimeCCDealt\": int,\n \"totalTimeSpentDead\": int,\n \"totalUnitsHealed\": int,\n \"tripleKills\": int,\n \"trueDamageDealt\": int,\n \"trueDamageDealtToChampions\": int,\n \"trueDamageTaken\": int,\n \"turretKills\": int,\n \"turretTakedowns\": int,\n \"turretsLost\": int,\n \"unrealKills\": int,\n \"visionClearedPings\": int,\n \"visionScore\": int,\n \"visionWardsBoughtInGame\": int,\n \"wardsKilled\": int,\n \"wardsPlaced\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfoTeamBan = TypedDict(\"LolMatchV5MatchInfoTeamBan\", {\n \"championId\": int,\n \"pickTurn\": int\n })\n LolMatchV5MatchInfoTeamObjectives = TypedDict(\"LolMatchV5MatchInfoTeamObjectives\", {\n \"baron\": LolMatchV5MatchTeamObjective,\n \"champion\": LolMatchV5MatchTeamObjective,\n \"dragon\": LolMatchV5MatchTeamObjective,\n \"horde\": NotRequired[LolMatchV5MatchTeamObjective],\n \"inhibitor\": LolMatchV5MatchTeamObjective,\n \"riftHerald\": LolMatchV5MatchTeamObjective,\n \"tower\": LolMatchV5MatchTeamObjective\n })\n LolMatchV5MatchInfoTeam = TypedDict(\"LolMatchV5MatchInfoTeam\", {\n \"bans\": list[LolMatchV5MatchInfoTeamBan],\n \"objectives\": LolMatchV5MatchInfoTeamObjectives,\n \"teamId\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfo = TypedDict(\"LolMatchV5MatchInfo\", {\n \"gameCreation\": int,\n \"gameDuration\": int,\n \"gameEndTimestamp\": int,\n \"gameId\": int,\n \"gameMode\": str,\n \"gameName\": str,\n \"gameStartTimestamp\": int,\n \"gameType\": str,\n \"gameVersion\": str,\n \"mapId\": int,\n \"participants\": list[LolMatchV5MatchInfoParticipant],\n \"platformId\": str,\n \"queueId\": int,\n \"teams\": list[LolMatchV5MatchInfoTeam],\n \"tournamentCode\": str\n })\n LolMatchV5Match = TypedDict(\"LolMatchV5Match\", {\n \"metadata\": LolMatchV5MatchMetadata,\n \"info\": LolMatchV5MatchInfo\n })\n LolMatchV5MatchTimelineParticipantFrameChampionStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameChampionStats\", {\n \"abilityHaste\": int,\n \"abilityPower\": int,\n \"armor\": int,\n \"armorPen\": int,\n \"armorPenPercent\": int,\n \"attackDamage\": int,\n \"attackSpeed\": int,\n \"bonusArmorPenPercent\": int,\n \"bonusMagicPenPercent\": int,\n \"ccReduction\": int,\n \"cooldownReduction\": int,\n \"health\": int,\n \"healthMax\": int,\n \"healthRegen\": int,\n \"lifesteal\": int,\n \"magicPen\": int,\n \"magicPenPercent\": int,\n \"magicResist\": int,\n \"movementSpeed\": int,\n \"omnivamp\": int,\n \"physicalVamp\": int,\n \"power\": int,\n \"powerMax\": int,\n \"powerRegen\": int,\n \"spellVamp\": int\n })\n LolMatchV5MatchTimelineParticipantFrameDamageStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameDamageStats\", {\n \"magicDamageDone\": int,\n \"magicDamageDoneToChampions\": int,\n \"magicDamageTaken\": int,\n \"physicalDamageDone\": int,\n \"physicalDamageDoneToChampions\": int,\n \"physicalDamageTaken\": int,\n \"totalDamageDone\": int,\n \"totalDamageDoneToChampions\": int,\n \"totalDamageTaken\": int,\n \"trueDamageDone\": int,\n \"trueDamageDoneToChampions\": int,\n \"trueDamageTaken\": int\n })\n LolMatchV5MatchTimelinePosition = TypedDict(\"LolMatchV5MatchTimelinePosition\", {\n \"x\": int,\n \"y\": int\n })\n LolMatchV5MatchTimelineParticipantFrame = TypedDict(\"LolMatchV5MatchTimelineParticipantFrame\", {\n \"championStats\": LolMatchV5MatchTimelineParticipantFrameChampionStats,\n \"currentGold\": int,\n \"damageStats\": LolMatchV5MatchTimelineParticipantFrameDamageStats,\n \"goldPerSecond\": int,\n \"jungleMinionsKilled\": int,\n \"level\": int,\n \"minionsKilled\": int,\n \"participantId\": int,\n \"position\": LolMatchV5MatchTimelinePosition,\n \"timeEnemySpentControlled\": int,\n \"totalGold\": int,\n \"xp\": int\n })\n LolMatchV5MatchTimelineEventDamage = TypedDict(\"LolMatchV5MatchTimelineEventDamage\", {\n \"basic\": bool,\n \"magicDamage\": int,\n \"name\": str,\n \"participantId\": int,\n \"physicalDamage\": int,\n \"spellName\": str,\n \"spellSlot\": int,\n \"trueDamage\": int,\n \"type\": str\n })\n LolMatchV5MatchTimelineMetadata = TypedDict(\"LolMatchV5MatchTimelineMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTimelineInfoFrameEvent = TypedDict(\"LolMatchV5MatchTimelineInfoFrameEvent\", {\n \"afterId\": NotRequired[int],\n \"beforeId\": NotRequired[int],\n \"goldGain\": NotRequired[int],\n \"participantId\": NotRequired[int],\n \"timestamp\": int,\n \"type\": str,\n \"creatorId\": NotRequired[int],\n \"wardType\": NotRequired[str],\n \"level\": NotRequired[int],\n \"itemId\": NotRequired[int],\n \"assistingParticipantIds\": NotRequired[list[int]],\n \"bounty\": NotRequired[int],\n \"killStreakLength\": NotRequired[int],\n \"killerId\": NotRequired[int],\n \"position\": NotRequired[LolMatchV5MatchTimelinePosition],\n \"shutdownBounty\": NotRequired[int],\n \"victimDamageDealt\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimDamageReceived\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimId\": NotRequired[int],\n \"levelUpType\": NotRequired[str],\n \"skillSlot\": NotRequired[int],\n \"realTimestamp\": NotRequired[int],\n })\n LolMatchV5MatchTimelineInfoFrame = TypedDict(\"LolMatchV5MatchTimelineInfoFrame\", {\n \"events\": list[LolMatchV5MatchTimelineInfoFrameEvent],\n \"participantFrames\": dict[str, LolMatchV5MatchTimelineParticipantFrame],\n \"timestamp\": int\n })\n LolMatchV5MatchTimelineInfoParticipants = TypedDict(\"LolMatchV5MatchTimelineInfoParticipants\", {\n \"participantId\": int,\n \"puuid\": str,\n })\n LolMatchV5MatchTimelineInfo = TypedDict(\"LolMatchV5MatchTimelineInfo\", {\n \"frameInterval\": int,\n \"frames\": list[LolMatchV5MatchTimelineInfoFrame],\n \"gameId\": int,\n \"participants\": list[LolMatchV5MatchTimelineInfoParticipants]\n })\n LolMatchV5MatchTimeline = TypedDict(\"LolMatchV5MatchTimeline\", {\n \"metadata\": LolMatchV5MatchTimelineMetadata,\n \"info\": LolMatchV5MatchTimelineInfo\n })\n LolSpectatorV4GameParticipantPerks = TypedDict(\"LolSpectatorV4GameParticipantPerks\", {\n \"perkIds\": list[int],\n \"perkStyle\": int,\n \"perkSubStyle\": int\n })\n LolSpectatorV4GameParticipant = TypedDict(\"LolSpectatorV4GameParticipant\", {\n \"gameCustomizationObjects\": NotRequired[list[str]],\n \"perks\": NotRequired[LolSpectatorV4GameParticipantPerks],\n \"puuid\": str,\n \"summonerId\": str,\n \"teamId\": int,\n \"spell1Id\": int,\n \"spell2Id\": int,\n \"championId\": int,\n \"profileIconId\": int,\n \"summonerName\": str,\n \"bot\": bool\n })\n LolSpectatorV4GameObservers = TypedDict(\"LolSpectatorV4GameObservers\", {\n \"encryptionKey\": str\n })\n LolSpectatorV4Game = TypedDict(\"LolSpectatorV4Game\", {\n \"gameId\": int,\n \"mapId\": int,\n \"gameMode\": str,\n \"gameType\": str,\n \"gameQueueConfigId\": int,\n \"participants\": list[LolSpectatorV4GameParticipant],\n \"observers\": LolSpectatorV4GameObservers,\n \"platformId\": str,\n \"bannedChampions\": list[int],\n \"gameStartTime\": int,\n \"gameLength\": int\n })\n LolSpectatorV4GameList = TypedDict(\"LolSpectatorV4GameList\", {\n \"gameList\": list[LolSpectatorV4Game],\n \"clientRefreshInterval\": int\n })\n LolSummonerV4Summoner = TypedDict(\"SummonerV4Summoner\", {\n \"id\": str,\n \"accountId\": str,\n \"puuid\": str,\n \"name\": str,\n \"profileIconId\": int,\n \"revisionDate\": int,\n \"summonerLevel\": int\n })\n\n # Teamfight Tactics Types\n\n TftLeagueV1LeagueEntry = TypedDict(\"TftLeagueV1LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n TftLeagueV1LeagueFullEntry = TypedDict(\"TftLeagueV1LeagueFullEntry\", {\n \"leagueId\": str,\n \"puuid\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n TftLeagueV1League = TypedDict(\"TftLeagueV1League\", {\n \"tier\": str,\n \"leagueId\": NotRequired[str],\n \"queue\": NotRequired[str],\n \"name\": NotRequired[str],\n \"entries\": list[TftLeagueV1LeagueEntry]\n })\n TftMatchV1MatchMetadata = TypedDict(\"TftMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n TftMatchV1MatchInfoParticipantCompanion = TypedDict(\"TftMatchV1MatchInfoParticipantCompanion\", {\n \"content_ID\": str,\n \"item_ID\": int,\n \"skin_ID\": int,\n \"species\": str\n })\n TftMatchV1MatchInfoParticipantTrait = TypedDict(\"TftMatchV1MatchInfoParticipantTrait\", {\n \"name\": str,\n \"num_units\": int,\n \"style\": int,\n \"tier_current\": int,\n \"tier_total\": int\n })\n TftMatchV1MatchInfoParticipantUnit = TypedDict(\"TftMatchV1MatchInfoParticipantUnit\", {\n \"character_id\": str,\n \"itemNames\": list[str],\n \"name\": str,\n \"rarity\": int,\n \"tier\": int\n })\n TftMatchV1MatchInfoParticipant = TypedDict(\"TftMatchV1MatchInfoParticipant\", {\n \"augments\": list[str],\n \"companion\": TftMatchV1MatchInfoParticipantCompanion,\n \"gold_left\": int,\n \"last_round\": int,\n \"level\": int,\n \"placement\": int,\n \"players_eliminated\": int,\n \"puuid\": str,\n \"time_eliminated\": float,\n \"total_damage_to_players\": int,\n \"traits\": list[TftMatchV1MatchInfoParticipantTrait],\n \"units\": list[TftMatchV1MatchInfoParticipantUnit]\n })\n TftMatchV1MatchInfo = TypedDict(\"TftMatchV1MatchInfo\", {\n \"game_datetime\": int,\n \"game_length\": float,\n \"game_version\": str,\n \"participants\": list[TftMatchV1MatchInfoParticipant],\n \"queue_id\": int,\n \"tft_game_type\": str,\n \"tft_set_core_name\": str,\n \"tft_set_number\": int\n })\n TftMatchV1Match = TypedDict(\"TftMatchV1Match\", {\n \"metadata\": TftMatchV1MatchMetadata,\n \"info\": TftMatchV1MatchInfo\n })\n TftSummonerV1Summoner = LolSummonerV4Summoner\n\n # Legends of Runeterra Types\n\n LorRankedV1LeaderboardPlayer = TypedDict(\"LorRankedV1LeaderboardPlayer\", {\n \"name\": str,\n \"rank\": int,\n \"lp\": float\n })\n LorRankedV1Leaderboard = TypedDict(\"LorRankedV1Leaderboard\", {\n \"players\": list[LorRankedV1LeaderboardPlayer]\n })\n LorMatchV1MatchMetadata = TypedDict(\"LorMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n LorMatchV1MatchInfoPlayer = TypedDict(\"LorMatchV1MatchInfoPlayer\", {\n \"puuid\": str,\n \"deck_id\": str,\n \"deck_code\": str,\n \"factions\": list[str],\n \"game_outcome\": str,\n \"order_of_play\": int\n })\n LorMatchV1MatchInfo = TypedDict(\"LorMatchV1MatchInfo\", {\n \"game_mode\": str,\n \"game_type\": str,\n \"game_start_time_utc\": str,\n \"game_version\": str,\n \"players\": list[LorMatchV1MatchInfoPlayer],\n \"total_turn_count\": int\n })\n LorMatchV1Match = TypedDict(\"LorMatchV1Match\", {\n \"metadata\": LorMatchV1MatchMetadata,\n \"info\": LorMatchV1MatchInfo\n })\n\n # Valorant Types\n\n ValContentV1ContentsAssetLocalizedNames = TypedDict(\"ValContentV1ContentsAssetLocalizedNames\", {\n \"ar-AE\": str,\n \"de-DE\": str,\n \"en-US\": str,\n \"es-ES\": str,\n \"es-MX\": str,\n \"fr-FR\": str,\n \"id-ID\": str,\n \"it-IT\": str,\n \"ja-JP\": str,\n \"ko-KR\": str,\n \"pl-PL\": str,\n \"pt-BR\": str,\n \"ru-RU\": str,\n \"th-TH\": str,\n \"tr-TR\": str,\n \"vi-VN\": str,\n \"zh-CN\": str,\n \"zh-TW\": str,\n })\n ValContentV1ContentsAsset = TypedDict(\"ValContentV1ContentsAsset\", {\n \"name\": str,\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"assetName\": str,\n \"assetPath\": NotRequired[str]\n })\n ValContentV1ContentsAct = TypedDict(\"ValContentV1ContentsAct\", {\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"parentId\": str,\n \"type\": str,\n \"name\": str,\n \"isActive\": bool\n })\n ValContentV1Contents = TypedDict(\"ValContentV1Contents\", {\n \"version\": str,\n \"characters\": list[ValContentV1ContentsAsset],\n \"maps\": list[ValContentV1ContentsAsset],\n \"chromas\": list[ValContentV1ContentsAsset],\n \"skins\": list[ValContentV1ContentsAsset],\n \"skinLevels\": list[ValContentV1ContentsAsset],\n \"equips\": list[ValContentV1ContentsAsset],\n \"gameModes\": list[ValContentV1ContentsAsset],\n \"totems\": list[ValContentV1ContentsAsset],\n \"sprays\": list[ValContentV1ContentsAsset],\n \"sprayLevels\": list[ValContentV1ContentsAsset],\n \"charms\": list[ValContentV1ContentsAsset],\n \"charmLevels\": list[ValContentV1ContentsAsset],\n \"playerCards\": list[ValContentV1ContentsAsset],\n \"playerTitles\": list[ValContentV1ContentsAsset],\n \"acts\": list[ValContentV1ContentsAct],\n \"ceremonies\": list[ValContentV1ContentsAsset]\n })\n \n ValRankedV1LeaderboardTierDetail = TypedDict(\"ValRankedV1LeaderboardTierDetail\", {\n \"rankedRatingThreshold\": int,\n \"startingPage\": int,\n \"startingIndex\": int\n })\n ValRankedV1LeaderboardPlayer = TypedDict(\"ValRankedV1LeaderboardPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"leaderboardRank\": int,\n \"rankedRating\": int,\n \"numberOfWins\": int,\n \"competitiveTier\": int\n })\n ValRankedV1LeaderboardTierDetails = TypedDict(\"ValRankedV1LeaderboardTierDetails\", {\n \"24\": ValRankedV1LeaderboardTierDetail,\n \"25\": ValRankedV1LeaderboardTierDetail,\n \"26\": ValRankedV1LeaderboardTierDetail,\n \"27\": ValRankedV1LeaderboardTierDetail\n })\n ValRankedV1Leaderboard = TypedDict(\"ValRankedV1Leaderboard\", {\n \"actId\": str,\n \"players\": list[ValRankedV1LeaderboardPlayer],\n \"totalPlayers\": int,\n \"immortalStartingPage\": int,\n \"immortalStartingIndex\": int,\n \"topTierRRThreshold\": int,\n \"tierDetails\": ValRankedV1LeaderboardTierDetails,\n \"startIndex\": int,\n \"query\": str,\n \"shard\": str\n })\n ValMatchV1MatchLocation = TypedDict(\"ValMatchV1MatchLocation\", {\n \"x\": float,\n \"y\": float\n })\n ValMatchV1MatchPlayerLocation = TypedDict(\"ValMatchV1MatchPlayerLocation\", {\n \"puuid\": str,\n \"viewRadians\": float,\n \"location\": ValMatchV1MatchLocation\n })\n ValMatchV1MatchInfo = TypedDict(\"ValMatchV1MatchInfo\", {\n \"matchId\": str,\n \"mapId\": str,\n \"gameVersion\": str,\n \"gameLengthMillis\": int,\n \"region\": str,\n \"gameStartMillis\": int,\n \"provisioningFlowId\": str,\n \"isCompleted\": bool,\n \"customGameName\": str,\n \"queueId\": str,\n \"gameMode\": str,\n \"isRanked\": bool,\n \"premierMatchInfo\": dict,\n \"seasonId\": str\n })\n ValMatchV1MatchPlayerStatsAbilityCasts = TypedDict(\"ValMatchV1MatchPlayerStatsAbilityCasts\", {\n \"grenadeCasts\": int,\n \"ability1Casts\": int,\n \"ability2Casts\": int,\n \"ultimateCasts\": int\n })\n ValMatchV1MatchPlayerStats = TypedDict(\"ValMatchV1MatchPlayerStats\", {\n \"score\": int,\n \"roundsPlayed\": int,\n \"kills\": int,\n \"deaths\": int,\n \"assists\": int,\n \"playtimeMillis\": int,\n \"abilityCasts\": ValMatchV1MatchPlayerStatsAbilityCasts | None\n })\n ValMatchV1MatchPlayer = TypedDict(\"ValMatchV1MatchPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"teamId\": str,\n \"partyId\": str,\n \"characterId\": str,\n \"stats\": ValMatchV1MatchPlayerStats,\n \"competitiveTier\": int,\n \"isObserver\": bool,\n \"playerCard\": str,\n \"playerTitle\": str,\n \"accountLevel\": int\n })\n ValMatchV1MatchTeam = TypedDict(\"ValMatchV1MatchTeam\", {\n \"teamId\": str,\n \"won\": bool,\n \"roundsPlayed\": int,\n \"roundsWon\": int,\n \"numPoints\": int\n })\n ValMatchV1MatchRoundResultPlayerStatKill = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatKill\", {\n \"timeSinceGameStartMillis\": int,\n \"timeSinceRoundStartMillis\": int,\n \"killer\": str,\n \"victim\": str,\n \"victimLocation\": ValMatchV1MatchLocation,\n \"assistants\": list[str],\n \"playerLocations\": list[ValMatchV1MatchPlayerLocation],\n \"finishingDamage\": TypedDict(\"FinishingDamage\", {\n \"damageType\": str,\n \"damageItem\": str,\n \"isSecondaryFireMode\": bool\n })\n })\n ValMatchV1MatchRoundResultPlayerStatDamage = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatDamage\", {\n \"receiver\": str,\n \"damage\": int,\n \"legshots\": int,\n \"bodyshots\": int,\n \"headshots\": int\n })\n ValMatchV1MatchRoundResultPlayerStatEconomy = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatEconomy\", {\n \"loadoutValue\": int,\n \"weapon\": str,\n \"armor\": str,\n \"remaining\": int,\n \"spent\": int\n })\n ValMatchV1MatchRoundResultPlayerStatAbility = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatAbility\", {\n \"grenadeEffects\": str | None,\n \"ability1Effects\": str | None,\n \"ability2Effects\": str | None,\n \"ultimateEffects\": str | None\n })\n ValMatchV1MatchRoundResultPlayerStat = TypedDict(\"ValMatchV1MatchRoundResultPlayerStat\", {\n \"puuid\": str,\n \"kills\": list[ValMatchV1MatchRoundResultPlayerStatKill],\n \"damage\": list[ValMatchV1MatchRoundResultPlayerStatDamage],\n \"score\": int,\n \"economy\": ValMatchV1MatchRoundResultPlayerStatEconomy,\n \"ability\": ValMatchV1MatchRoundResultPlayerStatAbility\n })\n ValMatchV1MatchRoundResult = TypedDict(\"ValMatchV1MatchRoundResult\", {\n \"roundNum\": int,\n \"roundResult\": str,\n \"roundCeremony\": str,\n \"winningTeam\": str,\n \"bombPlanter\": str | None,\n \"bombDefuser\": str | None,\n \"plantRoundTime\": int,\n \"plantPlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"plantLocation\": ValMatchV1MatchLocation,\n \"plantSite\": str,\n \"defuseRoundTime\": int,\n \"defusePlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"defuseLocation\": ValMatchV1MatchLocation,\n \"playerStats\": list[ValMatchV1MatchRoundResultPlayerStat],\n \"roundResultCode\": str\n })\n ValMatchV1Match = TypedDict(\"ValMatchV1Match\", {\n \"matchInfo\": ValMatchV1MatchInfo,\n \"players\": list[ValMatchV1MatchPlayer],\n \"coaches\": list[str],\n \"teams\": list[ValMatchV1MatchTeam],\n \"roundResults\": list[ValMatchV1MatchRoundResult]\n })\n ValMatchV1MatchlistHistory = TypedDict(\"ValMatchV1MatchlistHistory\", {\n \"matchId\": str,\n \"gameStartTimeMillis\": int,\n \"queueId\": str\n })\n ValMatchV1Matchlist = TypedDict(\"ValMatchV1Matchlist\", {\n \"puuid\": str,\n \"history\": list[ValMatchV1MatchlistHistory]\n })\n ValMatchV1RecentMatches = TypedDict(\"ValMatchV1RecentMatches\", {\n \"currentTime\": int,\n \"matchIds\": list[str]\n })\n\n # Status Types\n\n StatusV1PlatformDataLocaleContent = TypedDict(\"StatusV1PlatformDataLocaleContent\", {\n \"locale\": str,\n \"content\": str\n })\n StatusV1PlatformDataEntryUpdate = TypedDict(\"StatusV1PlatformDataEntryUpdate\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str,\n \"publish\": bool,\n \"author\": str,\n \"translations\": list[StatusV1PlatformDataLocaleContent],\n \"publish_locations\": list[str]\n })\n StatusV1PlatformDataEntry = TypedDict(\"StatusV1PlatformDataEntry\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str | None,\n \"archive_at\": str | None,\n \"titles\": list[StatusV1PlatformDataLocaleContent],\n \"updates\": list[StatusV1PlatformDataEntryUpdate],\n \"platforms\": list[str],\n \"maintenance_status\": str | None,\n \"incident_severity\": str | None\n })\n StatusV1PlatformData = TypedDict(\"StatusV4PlatformData\", {\n \"id\": str,\n \"name\": str,\n \"locales\": list[str],\n \"maintenances\": list[StatusV1PlatformDataEntry],\n \"incidents\": list[StatusV1PlatformDataEntry]\n })" }, { "identifier": "TaskGroup", "path": "pulsefire/taskgroups.py", "snippet": "class TaskGroup(asyncio.TaskGroup):\n \"\"\"Asynchronous context manager for managing groups of tasks.\n See [python asyncio task groups documentation](https://docs.python.org/3/library/asyncio-task.html#task-groups).\n\n Adapted for pulsefire, key differences from `asyncio.TaskGroup`:\n\n - Accepts a semaphore to restrict the amount of concurrent running coroutines.\n - Due to semaphore support, the `create_task` method is now async.\n - Allows internal collection of results and exceptions, similar to `asyncio.Task`.\n - If exception collection is on (default), the task group will not abort on task exceptions.\n\n Example:\n ```python\n async with TaskGroup(asyncio.Semaphore(100)) as tg:\n await tg.create_task(coro_func(...))\n results = tg.results()\n ```\n \"\"\"\n\n semaphore: asyncio.Semaphore | None = None\n \"\"\"Semaphore for restricting concurrent running coroutines.\"\"\"\n collect_results: bool = True\n \"\"\"Flag for collecting task results.\"\"\"\n collect_exceptions: bool = True\n \"\"\"Flag for collecting task exceptions, disables abort.\"\"\"\n\n def __init__(\n self,\n semaphore: asyncio.Semaphore | None = None,\n *,\n collect_results: bool = True,\n collect_exceptions: bool = True,\n ) -> None:\n super().__init__()\n self.semaphore = semaphore\n self.collect_results = collect_results\n self.collect_exceptions = collect_exceptions\n self._exceptions: list[BaseException] = []\n self._results = []\n\n async def __aenter__(self):\n self._exceptions = []\n self._results = []\n return await super().__aenter__()\n\n def results[T](self) -> list[T]:\n \"\"\"Return the collected results returned from created tasks.\"\"\"\n if not self.collect_results:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_results` off\")\n return self._results\n\n def exceptions(self) -> list[BaseException]:\n \"\"\"Return the collected exceptions raised from created tasks.\"\"\"\n if not self.collect_exceptions:\n raise RuntimeError(f\"TaskGroup {self!r} has `collect_exceptions` off\")\n return self._exceptions\n\n @override\n async def create_task[T](self, coro: Awaitable[T], *, name: str | None = None, context: Context | None = None) -> asyncio.Task[T]:\n \"\"\"Create a new task in this group and return it.\n\n If this group has a semaphore, wrap this semaphore on the coroutine.\n \"\"\"\n _coro = coro\n if self.semaphore:\n await self.semaphore.acquire()\n async def semaphored():\n try:\n return await _coro\n finally:\n self.semaphore.release()\n coro = semaphored()\n return super().create_task(coro, name=name, context=context)\n\n def _on_task_done(self, task) -> None:\n if exc := task.exception():\n if self.collect_exceptions:\n LOGGER.warning(\n \"TaskGroup: unhandled exception\\n\" +\n \"\".join(traceback.format_exception(type(exc), exc, exc.__traceback__))\n )\n self._exceptions.append(exc)\n self._tasks.discard(task)\n if self._on_completed_fut is not None and not self._tasks:\n if not self._on_completed_fut.done():\n self._on_completed_fut.set_result(True)\n return\n elif self.collect_results and not task.cancelled():\n self._results.append(task.result())\n return super()._on_task_done(task)" } ]
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema from pulsefire.taskgroups import TaskGroup
16,605
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"])
@async_to_sync() async def test_taskgroup(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: plat_league = await client.get_lol_league_v4_entries_by_division(region="na1", queue="RANKED_SOLO_5x5", tier="PLATINUM", division="IV") summoner = await client.get_lol_summoner_v4_by_id(region="na1", id=plat_league[0]["summonerId"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"])
async with TaskGroup() as tg:
3
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_tess_stochastic.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Optimal Load Flow with Steady State Security\"},\n IEEE Transactions on Power Apparatus and Systems, Vol. PAS 93, No. 3,\n 1974, pp. 745-751.\n\n ... with branch parameters rounded to nearest 0.01, shunt values divided\n by 100 and shunt on bus 10 moved to bus 5, load at bus 5 zeroed out.\n Generator locations, costs and limits and bus areas were taken from ...\n\n Ferrero, R.W., Shahidehpour, S.M., Ramesh, V.C., I{\"Transaction analysis\n in deregulated power systems using game theory\"}, IEEE Transactions on\n Power Systems, Vol. 12, No. 3, Aug 1997, pp. 1340-1347.\n\n Generator Q limits were derived from Alsac & Stott, using their Pmax\n capacities. V limits and line |S| limits taken from Alsac & Stott.\n\n @return: Power flow data for 30 bus, 6 generator case.\n @see: U{http://www.pserc.cornell.edu/matpower/}\n \"\"\"\n ppc = {\"version\": '2'}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array([\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [2, 1, 0.1, 0.06, 0, 0, 1, 1, 0, 12.66, 1, 1.1, 0.95],\n [3, 1, 0.09, 0.04, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [4, 1, 0.12, 0.08, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [5, 1, 0.06, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [6, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [7, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [8, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [9, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [10, 1, 0.06, 0.02, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [11, 1, 0.045, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [12, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [13, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [14, 1, 0.12, 0.08, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [15, 1, 0.06, 0.01, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [16, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [17, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [18, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [19, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [20, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [21, 1, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [22, 2, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [23, 2, 0.09, 0.05, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [24, 1, 0.42, 0.20, 0, 0.04, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [25, 1, 0.42, 0.2, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [26, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [27, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [28, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [29, 1, 0.12, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [30, 1, 0.2, 0.6, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [31, 1, 0.15, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [32, 1, 0.21, 0.1, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [33, 1, 0.06, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n ])\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf, start-up time, shut-down time and initial condition!\n ppc[\"gen\"] = array([\n [1, 23.54, 0, 150, -20, 1, 100, 1, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1],\n ])\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array([\n [1, 2, 0.057525912, 0.029324489, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [2, 3, 0.307595167, 0.15666764, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [3, 4, 0.228356656, 0.116299674, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [4, 5, 0.237777928, 0.121103899, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [5, 6, 0.510994811, 0.441115179, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [6, 7, 0.116798814, 0.386084969, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [7, 8, 0.44386045, 0.146684835, 0, 90, 90, 90, 0, 0, 1, -360, 360],\n [8, 9, 0.642643047, 0.461704714, 0, 70, 70, 70, 0, 0, 1, -360, 360],\n [9, 10, 0.651378001, 0.461704714, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [10, 11, 0.122663712, 0.040555144, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [11, 12, 0.233597628, 0.077241951, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [12, 13, 0.915922324, 0.720633708, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [13, 14, 0.337917936, 0.444796338, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [14, 15, 0.368739846, 0.328184702, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [15, 16, 0.465635443, 0.340039282, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [16, 17, 0.804239697, 1.073775422, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [17, 18, 0.456713311, 0.358133116, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [2, 19, 0.102323747, 0.097644308, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [19, 20, 0.938508419, 0.845668336, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [20, 21, 0.255497406, 0.298485858, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [21, 22, 0.442300637, 0.584805173, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [3, 23, 0.28151509, 0.192356167, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [23, 24, 0.560284909, 0.442425422, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [24, 25, 0.559037059, 0.43743402, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [6, 26, 0.126656834, 0.064513875, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [26, 27, 0.177319567, 0.090281989, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [27, 28, 0.660736881, 0.582559042, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [28, 29, 0.501760717, 0.437122057, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [29, 30, 0.316642084, 0.161284687, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [30, 31, 0.607952801, 0.600840053, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [31, 32, 0.193728802, 0.225798562, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [32, 33, 0.212758523, 0.330805188, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [7, 20, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [8, 14, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [11, 21, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [17, 32, 0.3120, 0.3120, 0, 65, 65, 65, 0, 0, 0, -360, 360],\n [24, 28, 0.3120, 0.3120, 0, 16, 16, 16, 0, 0, 0, -360, 360]\n ])\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([\n [1, 8],\n [2, 23],\n [3, 26],\n ])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array([\n [0, 0, 0, 3, 0.0, 20, 0]\n ])\n\n return ppc" }, { "identifier": "micro_grid", "path": "TestCasesMicrogrids/test_cases/cases_unit_commitment.py", "snippet": "AC_PD = array([323.0284, 308.2374, 318.1886, 307.9809, 331.2170, 368.6539, 702.0040, 577.7045, 1180.4547, 1227.6240,\n 1282.9344, 1311.9738, 1268.9502, 1321.7436, 1323.9218, 1327.1464, 1386.9117, 1321.6387, 1132.0476,\n 1109.2701, 882.5698, 832.4520, 349.3568, 299.9920])\nDC_PD = array([287.7698, 287.7698, 287.7698, 287.7698, 299.9920, 349.3582, 774.4047, 664.0625, 1132.6996, 1107.7366,\n 1069.6837, 1068.9819, 1027.3295, 1096.3820, 1109.4778, 1110.7039, 1160.1270, 1078.7839, 852.2514,\n 791.5814, 575.4085, 551.1441, 349.3568, 299.992])\nDG = {\"PMIN\": 0,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST_A\": 0.01,\n \"COST_B\": 0.5}\nUG = {\"PMIN\": -5,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST\": Price_UG, } # The cost should be a profile\nESS = {\"PDC_MAX\": 5,\n \"PCH_MAX\": 5,\n \"EFF_DC\": 0.95,\n \"EFF_CH\": 0.95,\n \"E0\": 10,\n \"EMIN\": 5,\n \"EMAX\": 20, }\nBIC = {\"PMAX\": 5,\n \"QMAX\": 5,\n \"SMAX\": 5,\n \"EFF_AC2DC\": 0.9,\n \"EFF_DC2AC\": 0.9, }\nMG = {\"PMAX\": 5,\n \"PMIN\": -5,\n \"QMAX\": 5,\n \"QMIN\": -5\n }\nPD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5,\n \"DC\": DC_PD / max(DC_PD),\n \"DC_MAX\": 5}\nQD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5, }\nPV = {\"PMAX\": 0,\n \"COST\": 0}" }, { "identifier": "PBIC_AC2DC", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PBIC_AC2DC = 4" }, { "identifier": "PG", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PG = 0" }, { "identifier": "PESS_DC", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PESS_DC = 8" }, { "identifier": "PBIC_DC2AC", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PBIC_DC2AC = 5" }, { "identifier": "PUG", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PUG = 2" }, { "identifier": "PESS_CH", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PESS_CH = 7" }, { "identifier": "PMESS", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "PMESS = 10 # Reactive power unit commitment of" }, { "identifier": "EESS", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "EESS = 9" }, { "identifier": "NX_MG", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "NX_MG = 11" }, { "identifier": "QBIC", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "QBIC = 6" }, { "identifier": "QUG", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "QUG = 3" }, { "identifier": "QG", "path": "TestCaseDistributionSystems/data_format/idx_MG.py", "snippet": "QG = 1" }, { "identifier": "DataBaseManagement", "path": "TestCaseDistributionSystems/database_management.py", "snippet": "class DataBaseManagement():\n\n def __init__(self, host=\"localhost\", user=\"root\", password=\"Ntu@1003\", db=\"mess\"):\n \"\"\"\n Initialized the database connection string\n :param host: host ip\n :param user: user name\n :param password: password\n :param db: database name\n :return\n \"\"\"\n self.db = pymysql.connect(host=host, user=user, password=password, db=db)\n\n def create_table(self, table_name, nl=32, nb=33, ng=6, nmg=3, nmes=3):\n \"\"\"\n Creat table name\n :param table_name:\n :param nb:\n :param nb:\n :param ng:\n :return: no return value\n \"\"\"\n cursor = self.db.cursor()\n sql = \"DROP TABLE IF EXISTS \"\n cursor.execute(sql + table_name)\n if table_name == \"distribution_networks\":\n sql_start = \"\"\"CREATE TABLE distribution_networks (\"\"\"\n sql = 'SCENARIO INT,\\n TIME INT NOT NULL,\\n '\n for i in range(nl):\n sql += \"PIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"QIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"IIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nb):\n sql += \"V{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng - 1):\n sql += \"QG{0} DECIMAL(8,6),\\n \".format(i)\n sql += \"QG{0} DECIMAL(8,6)\\n \".format(ng - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"micro_grids\":\n sql_start = \"\"\"CREATE TABLE micro_grids (\"\"\"\n sql = 'SCENARIO INT,\\n MG INT,\\n TIME INT,\\n '\n sql += 'PG DECIMAL(7,4),\\n QG DECIMAL(7,4),\\n PUG DECIMAL(7,4),\\n QUG DECIMAL(7,4),\\n '\n sql += 'PBIC_AC2DC DECIMAL(7,4),\\n PBIC_DC2AC DECIMAL(7,4),\\n QBIC DECIMAL(7,4),\\n PESS_CH DECIMAL(7,4),\\n '\n sql += 'PESS_DC DECIMAL(7,4),\\n EESS DECIMAL(7,4),\\n PMESS DECIMAL(7,4)'\n sql_end = \"\"\")\"\"\"\n elif table_name == \"mobile_energy_storage_systems\":\n sql_start = \"\"\"CREATE TABLE mobile_energy_storage_systems (\"\"\"\n sql = 'SCENARIO INT,\\n MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"EESS DECIMAL(7,4)\\n \"\n sql_end = \"\"\")\"\"\"\n elif table_name == \"first_stage_solutions\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE first_stage_solutions (\"\"\"\n sql = 'TIME INT,\\n'\n for i in range(ng):\n sql += \"PG{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"RG{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PG_MG{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"RG_MG{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"IESS{0} INT,\\n \".format(i)\n sql += \"PESS_DC{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"PESS_CH{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"RESS{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"ESS{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"PG_MG{0} DECIMAL(7,4),\\n \".format(nmg - 1)\n sql += \"RG_MG{0} DECIMAL(7,4),\\n \".format(nmg - 1)\n sql += \"IESS{0} INT,\\n \".format(nmg - 1)\n sql += \"PESS_DC{0} DECIMAL(7,4),\\n \".format(nmg - 1)\n sql += \"PESS_CH{0} DECIMAL(7,4),\\n \".format(nmg - 1)\n sql += \"RESS{0} DECIMAL(7,4),\\n \".format(nmg - 1)\n sql += \"ESS{0} DECIMAL(7,4)\\n \".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"fisrt_stage_mess\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE fisrt_stage_mess (\"\"\"\n sql = 'MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"IDC_MG{0} INT,\\n \".format(i)\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"RMESS{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"MESS_F_STOP INT,\\n \"\n sql += \"MESS_T_STOP INT\\n \"\n sql_end = \"\"\")\"\"\"\n else:\n sql_start = \"\"\"CREATE TABLE scenarios (\"\"\"\n sql = 'SCENARIO INT,\\n WEIGHT DECIMAL(7,4),\\n TIME INT,\\n'\n for i in range(nb):\n sql += \"PD{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_AC{0} DECIMAL(7,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PD_DC{0} DECIMAL(7,4),\\n \".format(i)\n sql += \"PD_DC{0} DECIMAL(7,4)\\n\".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n\n cursor.execute(sql_start + sql + sql_end)\n cursor.close()\n\n def insert_data_ds(self, table_name, nl=32, nb=33, ng=6, scenario=0, time=0, pij=0, qij=0, lij=0, vi=0, pg=0, qg=0):\n \"\"\"\n Insert data into table_name\n :param table_name:\n :param nl:\n :param nb:\n :param ng:\n :param pij:\n :param qij:\n :param lij:\n :param vi:\n :param pg:\n :param qg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,TIME,\"\n value = \"{0},{1},\".format(scenario, time)\n for i in range(nl):\n sql += \"PIJ{0},\".format(i)\n value += \"{0},\".format(pij[i])\n for i in range(nl):\n sql += \"QIJ{0},\".format(i)\n value += \"{0},\".format(qij[i])\n for i in range(nl):\n sql += \"IIJ{0},\".format(i)\n value += \"{0},\".format(lij[i])\n for i in range(nb):\n sql += \"V{0},\".format(i)\n value += \"{0},\".format(vi[i])\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n for i in range(ng - 1):\n sql += \"QG{0},\".format(i)\n value += \"{0},\".format(qg[i])\n sql += \"QG{0}\".format(ng - 1)\n value += \"{0}\".format(qg[ng - 1])\n\n sql += \") VALUES (\" + value + \")\"\n\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mg(self, table_name, scenario=0, time=0, mg=0, pg=0, qg=0, pug=0, qug=0, pbic_ac2dc=0, pbic_dc2ac=0,\n qbic=0, pess_ch=0, pess_dc=0, eess=0, pmess=0):\n \"\"\"\n insert microgrid data\n :param table_name:\n :param scenario:\n :param time:\n :param mg:\n :param pg:\n :param qg:\n :param pug:\n :param qug:\n :param pbic_ac2dc:\n :param pbic_dc2ac:\n :param qbic:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param pmess:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MG,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mg, time)\n sql += \"PG,QG,PUG,QUG,PBIC_AC2DC,PBIC_DC2AC,QBIC,PESS_CH,PESS_DC,EESS,PMESS\"\n value += \"{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}\".format(pg, qg, pug, qug, pbic_ac2dc, pbic_dc2ac, qbic,\n pess_ch, pess_dc, eess, pmess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage_mess(self, table_name, time=0, mess=0, imess=[0, 0, 0], pmess_ch=[0, 0, 0],\n pmess_dc=[0, 0, 0], rmess=[0, 0, 0], mess_f_stop=0, mess_t_stop=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data in the first-stage\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"MESS,TIME,\"\n value = \"{0},{1},\".format(mess, time)\n for i in range(nmg):\n sql += \"IDC_MG{0},\".format(i)\n value += \"{0},\".format(imess[i])\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n for i in range(nmg):\n sql += \"RMESS{0},\".format(i)\n value += \"{0},\".format(rmess[i])\n sql += \"MESS_F_STOP,MESS_T_STOP\"\n value += \"{0},{1}\".format(mess_f_stop, mess_t_stop)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mess(self, table_name, scenario=0, time=0, mess=0, pmess_ch=[0, 0, 0], pmess_dc=[0, 0, 0],\n emess=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MESS,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mess, time)\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n sql += \"EESS\"\n value += \"{0}\".format(emess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage(self, table_name, time=0, ng=2, nmg=2, pg=[0, 0], rg=[0, 0], pg_mg=[0, 0],\n rg_mg=[0, 0], iess=[0, 0], pess_dc=[0, 0], pess_ch=[0, 0], ress=[0, 0], ess=[0, 0]):\n \"\"\"\n insert scenario data\n :param table_name:\n :param scenario:\n :param weight:\n :param time:\n :param nb:\n :param nmg:\n :param pd:\n :param pd_ac:\n :param pd_dc:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"TIME,\"\n value = \"{0},\".format(time)\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n sql += \"RG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n value += \"{0},\".format(rg[i])\n if nmg > 1:\n for i in range(nmg - 1):\n sql += \"PG_MG{0},\".format(i)\n sql += \"RG_MG{0},\".format(i)\n sql += \"IESS{0},\".format(i)\n sql += \"PESS_DC{0},\".format(i)\n sql += \"PESS_CH{0},\".format(i)\n sql += \"RESS{0},\".format(i)\n sql += \"ESS{0},\".format(i)\n value += \"{0},\".format(pg_mg[i])\n value += \"{0},\".format(rg_mg[i])\n value += \"{0},\".format(iess[i])\n value += \"{0},\".format(pess_dc[i])\n value += \"{0},\".format(pess_ch[i])\n value += \"{0},\".format(ress[i])\n value += \"{0},\".format(ess[i])\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg[nmg - 1])\n value += \"{0},\".format(rg_mg[nmg - 1])\n value += \"{0},\".format(iess[nmg - 1])\n value += \"{0},\".format(pess_dc[nmg - 1])\n value += \"{0},\".format(pess_ch[nmg - 1])\n value += \"{0},\".format(ress[nmg - 1])\n value += \"{0}\".format(ess[nmg - 1])\n else:\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg)\n value += \"{0},\".format(rg_mg)\n value += \"{0},\".format(iess)\n value += \"{0},\".format(pess_dc)\n value += \"{0},\".format(pess_ch)\n value += \"{0},\".format(ress)\n value += \"{0}\".format(ess)\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_scenario(self, table_name, scenario=0, weight=0, time=0, nb=1, nmg=2, pd=[0, 0], pd_ac=[0, 0],\n pd_dc=[0, 0]):\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,WEIGHT,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, weight, time)\n for i in range(nb):\n sql += \"PD{0},\".format(i)\n value += \"{0},\".format(pd[i])\n for i in range(nmg):\n sql += \"PD_AC{0},\".format(i)\n value += \"{0},\".format(pd_ac[i])\n for i in range(nmg - 1):\n sql += \"PD_DC{0},\".format(i)\n value += \"{0},\".format(pd_dc[i])\n if nmg > 1:\n sql += \"PD_DC{0}\".format(nmg - 1)\n value += \"{0}\".format(pd_dc[nmg - 1])\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def inquery_data_scenario(self, table_name, scenario=0, time=0):\n cursor = self.db.cursor()\n # sql = \"SELECT * FROM \" + table_name + \" ;\"\n sql = \"SELECT * FROM \" + table_name + \" WHERE SCENARIO={0} AND TIME={1};\".format(scenario, time)\n cursor.execute(sql)\n data = cursor.fetchall()\n n_data = len(data[0])\n\n temp = []\n for i in range(n_data): temp.append(float(data[0][i]))\n\n cursor.close()\n return temp" }, { "identifier": "ScenarioReduction", "path": "StochasticOptimization/scenario_reduction.py", "snippet": "class ScenarioReduction():\n def __init__(self):\n self.name = \"Scenario reduction\"\n\n def run(self, scenario, weight, n_reduced, power):\n \"\"\"\n\n :param scenario: A fan scenario tree, when more stage are considered, some merge operation can be implemented\n :param weight: Weight of each scenario\n :param n_reduced: Number of scenarios needs to be reduced\n :param power: The power in the distance calculation\n :return:\n \"\"\"\n n_scenario = scenario.shape[0] # number of original scenarios\n c = zeros((n_scenario, n_scenario))\n # Calculate the c matrix\n for i in range(n_scenario):\n for j in range(n_scenario):\n c[i, j] = linalg.norm((scenario[i, :] - scenario[j, :]), 2)\n c[i, j] = max([1, linalg.norm(scenario[i, :], power - 1), linalg.norm(scenario[j, :], power - 1)]) * \\\n c[i, j]\n\n J = arange(n_scenario) # The original index range\n J_reduced = array([])\n # Implement the iteration\n for n in range(n_reduced): # find the minimal distance\n print(\"The reduction is in process {0}\".format(n))\n c_n = inf * ones(n_scenario)\n c_n[J] = 0\n for u in J:\n # Delete the i-th distance\n J_temp = delete(J, where(J == u))\n for k in J_temp:\n c_k_j = delete(c[int(k)], J_temp)\n c_n[int(u)] += weight[int(k)] * min(c_k_j)\n u_i = argmin(c_n)\n J_reduced = append(J_reduced, u_i)\n J = delete(J, where(J == u_i))\n # Optimal redistribution\n p_s = weight.copy()\n p_s[J_reduced.astype(int)] = 0\n\n for i in J_reduced:\n c_temp = c[int(i), :]\n c_temp[J_reduced.astype(int)] = inf\n index = argmin(c_temp)\n p_s[index] += weight[int(i)]\n\n scenario_reduced = scenario[J.astype(int), :]\n weight_reduced = p_s[J.astype(int)]\n\n return scenario_reduced, weight_reduced" } ]
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from numpy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_solvers_cplex import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG from TestCaseDistributionSystems.database_management import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,480
for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC),
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: [email protected] @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Stochastic optimal power flow with tess" def main(self, power_networks, micro_grids, profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng, nmes=self.nmes) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formualtion(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, ng * 5 + nmg * 2 + t] = 1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg + t] = -1 A_temp[t * nmg + j, ng * 5 + nmg * 2 + nmg * 2 + t] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[i * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[i * nmg + j, (i - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = 2 * gen[:, PMAX] / baseMVA qg_u = 2 * gen[:, QMAX] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg))]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1 Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1 Aeq_temp = hstack([Ax2y, Ay2x]) beq_temp = zeros(2 * nmg * T) Aeq_full = vstack([Aeq_full, Aeq_temp]) beq = concatenate([beq, beq_temp]) # III) Formulate the optimization problem for tess in the second stage optimization model_tess = {} for i in range(nmes): model_tess[i] = self.problem_formulation_tess_second_stage(mess=mess[i]) # III.1) Merge the models of mirogrids and distribution # Formulate the index nv_index_ev = zeros(1 + nmes).astype(int) neq_index_temp = zeros(1 + nmes).astype(int) nv_index_ev[0] = int(Aeq_full.shape[1]) neq_index_temp[0] = int(Aeq_full.shape[0]) for i in range(nmes): nv_index_ev[i + 1] = nv_index_ev[i] + len(model_tess[i]["c"]) neq_index_temp[i + 1] = neq_index_temp[i] + model_tess[i]["Aeq"].shape[0] Aeq = lil_matrix((int(neq_index_temp[-1]), int(nv_index_ev[-1]))) Aeq[0:int(neq_index_temp[0]), 0:int(nv_index_ev[0])] = Aeq_full for i in range(nmes): lb = concatenate([lb, model_tess[i]["lb"]]) ub = concatenate([ub, model_tess[i]["ub"]]) c = concatenate([c, model_tess[i]["c"]]) q = concatenate([q, model_tess[i]["q"]]) vtypes += model_tess[i]["vtypes"] beq = concatenate([beq, model_tess[i]["beq"]]) Aeq[neq_index_temp[i]:neq_index_temp[i + 1], nv_index_ev[i]:nv_index_ev[i + 1]] = model_tess[i]["Aeq"] # III.2) Coupling constraints between the microgrids and mobile energy storage systems # Additional equal constraints, nmg*T Aeq_temp = lil_matrix((nmg * T, nv_index_ev[-1])) beq_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Aeq_temp[i * T + t, nv_index[i] + t * NX_MG + PMESS] = 1 # TESSs injections to the MGs for j in range(nmes): Aeq_temp[i * T + t, nv_index_ev[j] + t * self.nb_tra_ele + i] = -1 # Discharging Aeq_temp[i * T + t, nv_index_ev[j] + self.nb_tra_ele * T + t * self.nb_tra_ele + i] = 1 # Sort by order Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate((beq, beq_temp)) nv_second_stage = nv_index_ev[-1] nv_first_stage = self.nv_first_stage self.nv_second_stage = nv_second_stage Qc = dict() # 4) Pij**2+Qij**2<=Vi*Iij for t in range(T): for i in range(nl): Qc[(T * nl + T * nmg) * index + t * nl + i] = [ [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl)], [int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + f[i] + 3 * nl), int(nv_first_stage + index * nv_second_stage + t * _nv_second_stage + i + 2 * nl)], [1, 1, -1 / 2, -1 / 2]] Rc = zeros(nl * T) # 5) (Pbic_ac2dc+Pbic_dc2ac)**2+Qbic**2<=Sbic**2 Rc_temp = zeros(nmg * T) for i in range(nmg): for t in range(T): Qc[(T * nl + T * nmg) * index + T * nl + T * i + t] = [ [int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_AC2DC),
int(nv_first_stage + index * nv_second_stage + nv_ds + NX_MG * T * i + NX_MG * t + PBIC_DC2AC),
5
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
GOOD.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,236
def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
self.env = GameEnv(ai_players)
3
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://www.kookapp.cn/api/v3\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 0\n self._self_id = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway/index?compress=0\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"s\": 2,\"sn\": self._sn}))\n continue\n js = json.loads(reply)\n s = js[\"s\"]\n if s == 5:raise Exception(\"recv reset ws\")\n elif s == 3:pass # heartbeat\n elif s == 1:\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n print(\"kook:ws连接成功\")\n elif s == 0:\n self._sn = js[\"sn\"]\n asyncio.create_task(self._event_deal(js[\"d\"]))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n asyncio.create_task(self._ws_server())\n\n def _kook_msg_to_satori(self,msg_type:int,message:str)->str:\n ret = \"\"\n if msg_type == 2: #图片\n ret += \"<img src={}/>\".format(json.dumps(message))\n else:\n def kook_msg_f(msg):\n ret = \"\"\n is_f = False\n for ch in msg:\n if is_f:\n is_f = False\n ret += ch\n elif ch == \"\\\\\":\n is_f = True\n else:\n ret += ch\n return ret\n \n index = 0\n msg_list = message.split(\"(met)\")\n for it in msg_list:\n if index % 2 == 0:\n ret += satori_to_plain(kook_msg_f(it))\n else:\n if it == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id=\\\"{}\\\"/>\".format(it)\n index += 1\n return ret\n\n\n async def _deal_group_message_event(self,data,user_id:str):\n group_id = data[\"target_id\"]\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n platform=\"kook\",\n channel=SatoriChannel(\n id=\"GROUP_\"+group_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=extra[\"channel_name\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=author[\"id\"],\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=author[\"nickname\"],\n avatar=author[\"avatar\"]\n ),\n guild=SatoriGuild(\n id=extra[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(author[\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_private_message_event(self,data,user_id:str):\n\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriPrivateMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n channel=SatoriChannel(\n id=user_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=author[\"username\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=user_id,\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n platform=\"kook\"\n ).to_dict()\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _deal_group_increase_event(self,data):\n extra = data[\"extra\"]\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"kook\",\n \"self_id\":self._self_id,\n \"timestamp\":data[\"msg_timestamp\"],\n \"guild\":SatoriGuild(id=data[\"target_id\"]).to_dict(),\n \"member\":SatoriGuildMember(joined_at=extra[\"body\"][\"joined_at\"]).to_dict(),\n \"user\":SatoriUser(id=extra[\"body\"][\"user_id\"]).to_dict()\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n\n\n async def _deal_group_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id == \"1\": # system message\n tp = data[\"type\"]\n if tp != 255:\n return\n sub_type = data[\"extra\"][\"type\"]\n if sub_type == \"joined_guild\":\n await self._deal_group_increase_event(data)\n else:\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_group_message_event(data,user_id)\n\n\n async def _deal_person_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id != 1: # 不是系统消息\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_private_message_event(data,user_id)\n\n\n async def _event_deal(self,data:dict):\n try:\n tp = data[\"channel_type\"]\n if tp == \"GROUP\":\n await self._deal_group_evt(data)\n else:\n await self._deal_person_evt(data)\n except:\n print(traceback.format_exc())\n \n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n async with httpx.AsyncClient() as client:\n return (await client.post(url,headers=headers,data=data)).json()[\"data\"]\n\n def _make_kook_text(self,text):\n ret = \"\"\n for ch in text:\n if ch in [\"\\\\\",\"*\",\"~\",\"[\",\"(\",\")\",\"]\",\"-\",\">\",\"`\"]:\n ret += \"\\\\\"\n ret += ch\n return ret\n \n async def _satori_to_kook(self,satori_obj) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_kook_text(node)\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"(met)all(met)\"\n elif id != None:\n text = \"(met){}(met)\".format(self._make_kook_text(id))\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n kook_img_url = \"\"\n if img_url.startswith(\"https://img.kookapp.cn\"):\n kook_img_url = img_url\n else:\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n files = {\n 'file':('test',img_content)\n }\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n async with httpx.AsyncClient() as client:\n ret = (await client.post(self._http_url + \"/asset/create\",files=files,headers=headers)).json()\n kook_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"content\":kook_img_url\n })\n last_type = 2\n return to_send_data\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_kook(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n channel_id = int(channel_id[6:])\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n else:\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/direct-message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/user/me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"kook\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = \"/user/view?user_id={}&guild_id={}\".format(user_id,guild_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"id\"],\n name=get_json_or(obret,\"username\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=get_json_or(obret,\"bot\",None)\n ),\n nick=get_json_or(obret,\"nickname\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n joined_at=get_json_or(obret,\"join_time\",None)\n ).to_dict()\n return satori_ret\n \n async def get_user(self,platform:Optional[str],self_id:Optional[str],user_id:str) -> [dict]:\n '''获取用户信息'''\n url = \"/user/view?user_id={}\".format(user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=obret[\"bot\"],\n ).to_dict()\n return satori_ret\n \n async def get_channel_list(self,platform:Optional[str],self_id:Optional[str],guild_id:str) -> [dict]:\n '''获取频道列表'''\n url = \"/channel/list?guild_id={}\".format(guild_id)\n obret = (await self._api_call(url))\n ret_list = []\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent_id=channel_parent\n ).to_dict())\n page_total = get_json_or(obret,\"data\",1)\n if page_total > 1:\n for i in range(2,page_total + 1):\n url = \"/channel/list?guild_id={}&page={}\".format(guild_id,i)\n obret = (await self._api_call(url))\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent=channel_parent\n ).to_dict())\n return {\"data\":ret_list}" }, { "identifier": "AdapterMihoyo", "path": "mihoyo_adapter.py", "snippet": "class AdapterMihoyo:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = \"https://bbs-api.miyoushe.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 1\n self._self_id = config[\"bot_id\"]\n self._secret = config[\"secret\"]\n self._villa_id = config[\"villa_id\"]\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def _send_ws_pack(self,ws,ws_dat,biztype):\n magic = 0xBABEFACE.to_bytes(length=4, byteorder='little', signed=False)\n if biztype == 7:\n pb_pack = bytes(PLogin(\n uid=int(ws_dat[\"uid\"]),\n token=self._villa_id + \".\" + self._secret + \".\" + self._self_id,\n platform=ws_dat[\"platform\"],\n app_id=ws_dat[\"app_id\"],\n device_id=ws_dat[\"device_id\"]\n ))\n elif biztype == 6:\n pb_pack = bytes(PHeartBeat(\n client_timestamp=str(int(round(time.time() * 1000)))\n ))\n else:\n raise Exception(\"unkonw biztype:{}\".format(biztype))\n \n wid = self._sn\n self._sn += 1\n\n flag = 1\n appid = 104\n headerlen = 24\n datalen = headerlen + len(pb_pack)\n\n to_send = magic\n to_send += datalen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += headerlen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += wid.to_bytes(length=8, byteorder='little', signed=False)\n to_send += flag.to_bytes(length=4, byteorder='little', signed=False)\n to_send += biztype.to_bytes(length=4, byteorder='little', signed=False)\n to_send += appid.to_bytes(length=4, byteorder='little', signed=True)\n to_send += pb_pack\n\n await ws.send(to_send)\n \n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_dat = (await self._api_call(\"/vila/api/bot/platform/getWebsocketInfo\"))\n # print(ws_dat)\n ws_url = ws_dat[\"websocket_url\"]\n async with connect(ws_url) as websocket:\n await self._send_ws_pack(websocket,ws_dat,biztype=7)\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await self._send_ws_pack(websocket,ws_dat,biztype=6)\n continue\n biztype = int.from_bytes(reply[24:28],byteorder='little',signed=False)\n if biztype == 7: # 登录返回\n login_reply = PLoginReply().parse(reply[32:])\n if login_reply.code == 0:\n print(\"mihoyo:ws连接成功\")\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n continue\n else:\n print(\"mihoyo:ws连接失败\",login_reply.to_json())\n break\n elif biztype == 53:\n print(\"mihoyo:ws被踢下线\")\n pkoff = PKickOff().parse(reply[32:])\n print(\"mihoyo:\" + pkoff.reason)\n break\n elif biztype == 52:\n print(\"mihoyo:ws服务关机\")\n break\n elif biztype == 6:\n heart_reply = PHeartBeatReply().parse(reply[32:])\n if heart_reply.code != 0:\n print(\"mihoyo:ws心跳失败\")\n break\n elif biztype == 30001: # 正常处理\n evt = RobotEvent().parse(reply[32:]).to_dict()\n asyncio.create_task(self._event_deal(evt))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n traceback.print_exc()\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n asyncio.create_task(self._ws_server())\n\n def _mihoyo_msg_to_satori(self,content_obj)->str:\n ret = \"\"\n entities = content_obj[\"content\"][\"entities\"]\n text = content_obj[\"content\"][\"text\"]\n l = len(text)\n i = 0\n while i < l:\n for en in entities:\n if en[\"offset\"] == i:\n print(en)\n i += en[\"length\"]\n if en[\"entity\"][\"type\"] == \"mention_all\": # 实际上收不到\n ret += \"<at type=\\\"all\\\"/>\"\n elif en[\"entity\"][\"type\"] == \"mentioned_robot\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"bot_id\"])\n elif en[\"entity\"][\"type\"] == \"mentioned_user\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"user_id\"])\n break\n else:\n ret += satori_to_plain(text[i])\n i += 1\n return ret\n async def _deal_group_message_event(self,data):\n extendData = data[\"extendData\"]\n\n sendMessage = extendData[\"sendMessage\"]\n user_id = sendMessage[\"fromUserId\"]\n villaId = sendMessage[\"villaId\"]\n roomId = sendMessage[\"roomId\"]\n\n villaRoomId = villaId + \"_\" + roomId\n\n content_obj = json.loads(sendMessage[\"content\"])\n\n extra_obj = json.loads(content_obj[\"user\"][\"extra\"])\n\n satori_msg = self._mihoyo_msg_to_satori(content_obj) # todo\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(data[\"sendAt\"]) * 1000,\n platform=\"mihoyo\",\n channel=SatoriChannel(\n id=villaRoomId,\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(sendMessage[\"sendAt\"])\n ),\n user=SatoriUser(\n id=user_id,\n name=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n member=SatoriGuildMember(\n nick=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n guild=SatoriGuild(\n id=villaId\n ),\n role=SatoriGuildRole(\n id=extra_obj[\"member_roles\"][\"name\"],\n name=extra_obj[\"member_roles\"][\"name\"]\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _event_deal(self,data:dict):\n try:\n event_type = data[\"type\"]\n if event_type == \"SendMessage\":\n await self._deal_group_message_event(data)\n except:\n print(traceback.format_exc())\n\n \n async def _api_call(self,path,data = None,villa_id = 0) -> dict:\n url:str = self._http_url + path\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret}\n if villa_id == 0:\n headers[\"x-rpc-bot_villa_id\"] = self._villa_id\n else:\n headers[\"x-rpc-bot_villa_id\"] = villa_id\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n headers[\"Content-Type\"] = \"application/json\"\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,data=data)).json()\n if ret[\"retcode\"] != 0:\n print(\"mihoyo:\",ret)\n return ret[\"data\"]\n\n \n async def _satori_to_mihoyo(self,satori_obj,villa_id) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = node\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"text\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"text\":text,\n \"entities\":[]\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"@全体成员\"\n elif id != None:\n text = \"@\" + id\n else:\n continue\n\n if last_type != 1 or len(to_send_data) == 0:\n to_send_data.append({\n \"type\":1,\n \"text\":\"\",\n \"entities\":[]\n })\n last_type = 1\n\n l = len(to_send_data)\n ll = len(to_send_data[l - 1][\"text\"])\n to_send_data[l - 1][\"text\"] += text\n if type == \"all\":\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mention_all\"\n },\n \"length\":5,\n \"offset\":ll\n })\n else:\n if id.startswith(\"bot_\"):\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_robot\",\n \"bot_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n else:\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_user\",\n \"user_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n mihoyo_img_url = \"\"\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ext = imghdr.what(file = \"\",h=img_content)\n m = hashlib.md5()\n m.update(img_content)\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":villa_id}\n upload_info_url = self._http_url + \"/vila/api/bot/platform/getUploadImageParams\"\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",upload_info_url,json={\n \"md5\":m.hexdigest(),\n \"ext\":ext\n },headers=headers)\n file_params = (await client.send(req)).json()[\"data\"][\"params\"]\n files = {\n \"x:extra\":file_params[\"callback_var\"][\"x:extra\"],\n \"OSSAccessKeyId\":file_params[\"accessid\"],\n \"signature\":file_params[\"signature\"],\n \"success_action_status\":file_params[\"success_action_status\"],\n \"name\":file_params[\"name\"],\n \"callback\":file_params[\"callback\"],\n \"x-oss-content-type\":file_params[\"x_oss_content_type\"],\n \"key\":file_params[\"key\"],\n \"policy\":file_params[\"policy\"],\n \"Content-Disposition\":file_params[\"content_disposition\"],\n 'file':('test',img_content)\n }\n async with httpx.AsyncClient() as client:\n ret = (await client.post(file_params[\"host\"],files=files)).json()\n mihoyo_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"url\":mihoyo_img_url,\n })\n last_type = 2\n to_send_data2 = []\n for it in to_send_data:\n type = it[\"type\"]\n if type == 1:\n to_send_data2.append({\n \"object_name\":\"MHY:Text\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"text\":it[\"text\"],\n \"entities\":it[\"entities\"]\n }\n })})\n elif type == 2:\n to_send_data2.append({\n \"object_name\":\"MHY:Image\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"url\":it[\"url\"]\n }\n \n })})\n \n return to_send_data2\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n villa_id = channel_id.split(\"_\")[0]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_mihoyo(satori_obj,villa_id)\n to_ret = []\n # print(to_sends)\n for it in to_sends:\n it[\"room_id\"] = channel_id.split(\"_\")[1]\n ret = await self._api_call(\"/vila/api/bot/platform/sendMessage\",json.dumps(it),villa_id=villa_id)\n to_ret.append(SatoriMessage(id=ret[\"bot_msg_id\"],content=\"\").to_dict())\n return to_ret\n \n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._self_id,\n is_bot=True\n ),\n self_id=self._self_id,\n platform=\"mihoyo\"\n ).to_dict()\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n\n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = self._http_url + \"/vila/api/bot/platform/getMember\"\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":guild_id}\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",url,json={\n \"uid\":user_id\n },headers=headers)\n obret = (await client.send(req)).json()[\"data\"][\"member\"]\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"basic\"][\"uid\"],\n name=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n is_bot=False\n ),\n nick=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n joined_at=int(obret[\"joined_at\"] + \"000\")\n ).to_dict()\n return satori_ret" }, { "identifier": "AdapterOnebot", "path": "onebot_adapter.py", "snippet": "class AdapterOnebot:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = config[\"http_url\"]\n self._ws_url = config[\"ws_url\"]\n if \"access_token\" in config:\n self._access_token = config[\"access_token\"]\n else:\n self._access_token = None\n self._is_stop = False\n self._login_status = 3 # DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n\n def _cqarr_to_satori(self,cqarr):\n ret = \"\"\n for node in cqarr:\n if node[\"type\"] == \"text\":\n ret += satori_to_plain(node[\"data\"][\"text\"])\n elif node[\"type\"] == \"at\":\n qq = node[\"data\"][\"qq\"]\n if qq == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id={}/>\".format(json.dumps(qq))\n elif node[\"type\"] == \"image\":\n url = node[\"data\"][\"url\"]\n ret += \"<img src={}/>\".format(json.dumps(url))\n return ret\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n async def _ws_server(self:AdapterOnebot) -> None:\n while not self._is_stop:\n try:\n self._login_status = 2 # CONNECT\n async with connect(self._ws_url) as websocket:\n print(\"onebot:ws已经连接\")\n self._login_status = 1 # ONLINE\n try:\n while True:\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n await self._event_deal(json.loads(reply))\n except asyncio.TimeoutError:\n if self._is_stop:\n await websocket.close()\n except asyncio.QueueFull:\n print(\"队列满\")\n except Exception as e:\n print(e) \n except Exception as e:\n print(e)\n print(\"onebot:ws连接已经断开\")\n self._login_status = 3 # DISCONNECT\n asyncio.create_task(_ws_server(self))\n \n async def _event_deal(self,evt:dict):\n '''自己定义的事件转化函数'''\n post_type = evt[\"post_type\"]\n if post_type == \"message\":\n message_type = evt[\"message_type\"]\n sender = evt[\"sender\"]\n if message_type == \"group\":\n channel_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"type\":0,\n \"name\":None,\n \"parent_id\":None\n }\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n member_obj = {\n \"nick\":get_json_or(sender,\"card\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"joined_at\":joined_at\n }\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n role_obj = {\n \"id\":get_json_or(sender, \"role\",\"member\"),\n \"name\":get_json_or(sender,\"role\",\"member\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"message\":message_obj,\n \"role\":role_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif message_type == \"private\":\n channel_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"type\":1,\n \"name\":None,\n \"parent_id\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"message\":message_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif post_type == \"notice\":\n notice_type = evt[\"notice_type\"]\n if notice_type == \"group_increase\":\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n member_obj = {\n \"nick\":None,\n \"avatar\":get_json_or(evt,\"avatar\",None),\n \"joined_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":None,\n \"nick\":None,\n \"avatar\":None,\n \"is_bot\":None\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _api_call(self,path,data) -> dict:\n url:str = self._http_url + path\n if self._access_token:\n headers = {\"Authorization\":\"Bearer {}\".format(self._access_token)}\n else:\n headers = {}\n async with httpx.AsyncClient() as client:\n # headers[\"Content-Type\"] = \"application/json\"\n return (await client.post(url,headers=headers,data=data)).json()\n \n async def _satori_to_cq(self,satori_obj) -> str:\n ret = \"\"\n for node in satori_obj:\n if isinstance(node,str):\n ret += _cq_text_encode(node)\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n ret += \"[CQ:at,qq=all]\"\n elif id != None:\n ret += \"[CQ:at,qq={}]\".format(_cq_params_encode(id))\n elif node[\"type\"] == \"img\":\n img_url = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_url = \"base64://\" + img_url[base64_start + 7:]\n ret += \"[CQ:image,file={}]\".format(_cq_params_encode(img_url)) \n\n return ret\n\n\n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_send = await self._satori_to_cq(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n group_id = int(channel_id[6:])\n ret = await self._api_call(\"/send_group_msg\",{\"group_id\":group_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n else:\n user_id = int(channel_id)\n ret = await self._api_call(\"/send_private_msg\",{\"user_id\":user_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/get_login_info\",{}))[\"data\"]\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":obret[\"nickname\"],\n \"nick\":obret[\"nickname\"],\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"self_id\":str(obret[\"user_id\"]),\n \"platform\":\"onebot\",\n \"status\":self._login_status,\n }\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n obret = (await self._api_call(\"/get_group_member_info\",{\n \"group_id\":int(guild_id[6:]),\n \"user_id\":int(user_id)\n }))[\"data\"]\n joined_at = get_json_or(obret,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":get_json_or(obret,\"nickname\",None),\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"joined_at\":joined_at,\n }\n return satori_ret" }, { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(self) -> None:\n self.botlist:list = []\n self.web_port:int = 8080\n self.web_host:str = \"127.0.0.1\"\n self.access_token:str = \"\"\n \n async def read_config(self):\n async with aiofiles.open('config.json', mode='r') as f:\n json_dat = json5.loads(await f.read())\n self.botlist = json_dat[\"botlist\"]\n self.web_port = json_dat[\"web_port\"]\n self.web_host = json_dat[\"web_host\"]\n self.access_token = json_dat[\"access_token\"]" }, { "identifier": "AdapterQQ", "path": "qq_adapter.py", "snippet": "class AdapterQQ:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._botqq = config[\"botqq\"]\n self._appid = config[\"appid\"]\n self._token = config[\"token\"]\n if \"withgroup\" in config:\n self._withgroup = config[\"withgroup\"]\n else:\n self._withgroup = None\n self._appsecret = config[\"appsecret\"]\n self._http_url = \"https://api.sgroup.qq.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = None\n self._self_id = None\n self._access_token = None\n self._expires_in = 0\n self.msgid_map = dict()\n # self._self_name = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"op\": 1,\"d\": self._sn}))\n continue\n js = json.loads(reply)\n op = js[\"op\"]\n if op == 0: # 事件\n self._sn = js[\"s\"]\n t = js[\"t\"]\n if t == \"READY\":\n print(\"qq:ws连接成功\")\n print(json.dumps(js))\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n else:\n print(json.dumps(js))\n asyncio.create_task(self._deal_event(js))\n elif op == 1: # 心跳\n await websocket.send(json.dumps({\"op\":11}))\n elif op == 7: # 重连\n print(\"qq:服务端要求重连\")\n break\n elif op == 9: # 参数错误\n print(\"qq:参数错误:\",json.dumps(js))\n break\n elif op == 10: # ws建立成功\n if self._withgroup:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30) | (1 << 25),\n \"shard\":[0, 1],\n }\n }))\n else:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30),\n \"shard\":[0, 1],\n }\n }))\n elif op == 11: # HTTP Callback ACK\n pass\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def _token_refresh(self):\n async with httpx.AsyncClient() as client:\n if not self._expires_in or int(self._expires_in) < 60 * 5:\n url = \"https://bots.qq.com/app/getAppAccessToken\"\n ret = (await client.post(url,json={\n \"appId\":self._appid,\n \"clientSecret\":self._appsecret\n })).json()\n self._access_token = ret[\"access_token\"]\n self._expires_in = ret[\"expires_in\"]\n # print(ret)\n\n async def _qqarr_to_satori(self,qqmsg_arr):\n ret = \"\"\n for it in qqmsg_arr:\n if it[\"type\"] == \"text\":\n ret += satori_to_plain(it[\"data\"])\n else:\n if it[\"data\"].startswith(\"<@!\"):\n user_id = it[\"data\"][3:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n elif it[\"data\"].startswith(\"<@\"):\n user_id = it[\"data\"][2:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n return ret\n \n async def _deal_channel_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"CHANNEL_\"+data[\"channel_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_guild\",\n channel=SatoriChannel(\n id=\"CHANNEL_\"+data[\"channel_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"],\n name=data[\"author\"][\"username\"],\n avatar=data[\"author\"][\"avatar\"],\n is_bot=data[\"author\"][\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=data[\"member\"][\"nick\"],\n avatar=data[\"author\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(data[\"member\"][\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n guild=SatoriGuild(\n id=data[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(data[\"member\"][\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_group_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"GROUP_\"+data[\"group_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._botqq,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_group\",\n channel=SatoriChannel(\n id=\"GROUP_\"+data[\"group_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"]\n ),\n member=SatoriGuildMember(\n ),\n guild=SatoriGuild(\n id=\"GROUP_\"+data[\"group_id\"]\n ),\n role=SatoriGuildRole(\n id=\"unkonw\",\n name=\"unkonw\"\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_event(self,event):\n try:\n type = event[\"t\"]\n if type == \"AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"channel_id\" in d) and d[\"channel_id\"]:\n await self._deal_channel_event(d)\n else:\n if type == \"GROUP_AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"group_id\" in d) and d[\"group_id\"]:\n await self._deal_group_event(d)\n except:\n print(traceback.format_exc())\n\n async def _token_refresh_task(self):\n while True:\n try:\n await self._token_refresh()\n index = 0\n while index < 60: # 每60秒检测一次token是否过期\n await asyncio.sleep(1)\n if self._is_stop:\n break\n index += 1\n if self._is_stop:break\n except:\n print(traceback.format_exc())\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n try:\n await self._token_refresh()\n except:\n print(traceback.format_exc())\n asyncio.create_task(self._token_refresh_task())\n asyncio.create_task(self._ws_server())\n\n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()\n else:\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,json=data))\n # print(ret.content)\n return ret.json()\n\n def _make_qq_text(self,text:str):\n ret = text\n ret = ret.replace(\"&\",\"&amp;\")\n ret = ret.replace(\"<\",\"&lt;\")\n ret = ret.replace(\">\",\"&gt;\")\n return ret\n \n async def _satori_to_qq(self,satori_obj,platform = \"qq_guild\") -> [dict]:\n to_reply_id = None\n ret_text = \"\"\n ret_img = []\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_qq_text(node)\n ret_text += text\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n # 注意,机器人不支持at all,不能发,也不能收,这里假装at all了\n ret_text += \"@全体成员\"\n # text = \"<@everyone>\"\n elif id != None:\n ret_text += \"<@{}>\".format(self._make_qq_text(id))\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n ret_img.append(img_content)\n else:\n if platform == \"qq_guild\":\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ret_img.append(img_content)\n else:\n ret_img.append(img_url)\n elif node[\"type\"] == \"passive\":\n to_reply_id = node[\"attrs\"][\"id\"]\n \n ret_vec = []\n ret_vec.append({\n \"content\":ret_text,\n \"file_image\":None,\n \"to_reply_id\":to_reply_id\n })\n if len(ret_img) != 0:\n ret_vec[0][\"file_image\"] = ret_img[0]\n for img in ret_img[1:]:\n ret_vec.append({\n \"content\":\"\",\n \"file_image\":img,\n \"to_reply_id\":to_reply_id\n })\n return ret_vec\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n to_reply_id = self.msgid_map[channel_id]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_qq(satori_obj,platform)\n # print(to_sends)\n if channel_id.startswith(\"CHANNEL_\") and platform == \"qq_guild\":\n channel_id = channel_id[8:]\n to_ret = []\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/channels/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"]\n }\n if it[\"file_image\"]:\n ret = (await client.post(url,headers=headers,data=data,files={\"file_image\":it[\"file_image\"]})).json()\n else:\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"id\"],content=\"\").to_dict())\n return to_ret\n elif channel_id.startswith(\"GROUP_\") and platform == \"qq_group\":\n channel_id = channel_id[6:]\n to_ret = []\n msg_seq = 1\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/v2/groups/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"],\n \"msg_type\":0,\n \"msg_seq\":msg_seq,\n # \"image\": 目前暂不支持\n }\n msg_seq += 1\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n\n if platform == \"qq_group\":\n return SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()\n else: \n obret = (await self._api_call(\"/users/@me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"qq_guild\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == \"qq_guild\":\n return satori_ret\n elif platform == None:\n if not self._withgroup:\n return [satori_ret]\n else:\n return [satori_ret,SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()]\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n if platform == \"qq_guild\":\n url = \"/guilds/{}/members/{}\".format(guild_id,user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"user\"][\"id\"],\n name=obret[\"user\"][\"username\"],\n avatar=obret[\"user\"][\"avatar\"],\n is_bot=obret[\"user\"][\"bot\"]\n ),\n nick=get_json_or(obret,\"nick\",None),\n avatar=obret[\"user\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(obret[\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ).to_dict()\n return satori_ret" }, { "identifier": "remove_json_null", "path": "tool.py", "snippet": "def remove_json_null(js) -> dict:\n '''将json中的None字段删除'''\n if isinstance(js,dict):\n st = {}\n for key in js:\n if js[key] != None:\n st[key] = remove_json_null(js[key])\n return st\n elif isinstance(js,list):\n lst = []\n for it in js:\n lst.append(remove_json_null(it))\n return lst\n else:\n return js" } ]
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
17,914
return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook": adapter = AdapterKook(botcfg) elif botcfg["platform"] == "mihoyo": adapter = AdapterMihoyo(botcfg) elif botcfg["platform"] == "qq":
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID") adapter:AdapterOnebot = await self._get_adapter(platform,self_id) if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook": adapter = AdapterKook(botcfg) elif botcfg["platform"] == "mihoyo": adapter = AdapterMihoyo(botcfg) elif botcfg["platform"] == "qq":
adapter = AdapterQQ(botcfg)
4
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_dir):\ndef _clone_command_for_github(git_config, dest_dir):\ndef _clone_command_for_ssh(git_config, dest_dir):\ndef _clone_command_for_github_https(git_config, dest_dir):\ndef _clone_command_for_codeup_https(git_config, dest_dir):\ndef _clone_command(repo_url, dest_dir, branch=None):\ndef _update_url_with_token(repo_url, token):\ndef _update_url_with_username_and_password(repo_url, username, password):\ndef _checkout_commit(git_config, dest_dir):" }, { "identifier": "INSTANCE_TYPE_LOCAL_GPU", "path": "pai/common/consts.py", "snippet": "INSTANCE_TYPE_LOCAL_GPU = \"local_gpu\"" }, { "identifier": "ModelFormat", "path": "pai/common/consts.py", "snippet": "class ModelFormat(object):\n SavedModel = \"SavedModel\"\n FrozenPb = \"FrozenPb\"\n KerasH5 = \"KerasH5\"\n CaffePrototxt = \"Caffe\"\n ONNX = \"ONNX\"\n BladeModel = \"BladeModel\"\n PMML = \"PMML\"\n TorchScript = \"TorchScript\"\n TFLite = \"TFLite\"" }, { "identifier": "ContainerRun", "path": "pai/common/docker_utils.py", "snippet": "class ContainerRun(object):\n \"\"\"A class represent a container run in local.\"\"\"\n\n CONTAINER_STATUS_RUNNING = \"running\"\n CONTAINER_STATUS_EXITED = \"exited\"\n CONTAINER_STATUS_PAUSED = \"paused\"\n\n def __init__(self, container, port: Optional[int] = None):\n \"\"\"Initialize a container run.\n\n Args:\n container: A docker container object.\n port (int): The host port that container is exposed to.\n\n \"\"\"\n self.container = container\n self.port = port\n\n @property\n def status(self):\n self.container.reload()\n return self.container.status\n\n def is_running(self):\n \"\"\"Return True if container is running, otherwise False.\"\"\"\n return self.status == self.CONTAINER_STATUS_RUNNING\n\n def is_terminated(self):\n \"\"\"Return True if container is terminated, otherwise False.\"\"\"\n return self.status in [\n self.CONTAINER_STATUS_EXITED,\n self.CONTAINER_STATUS_PAUSED,\n ]\n\n def is_succeeded(self):\n \"\"\"Return True if container is succeeded, otherwise False.\"\"\"\n return (\n self.status == \"exited\" and self.container.attrs[\"State\"][\"ExitCode\"] == 0\n )\n\n def wait_for_ready(self, interval=5):\n \"\"\"Wait until container enter running state or terminated state.\"\"\"\n while True:\n status = self.status\n if status == self.CONTAINER_STATUS_RUNNING:\n break\n elif status in [self.CONTAINER_STATUS_EXITED, self.CONTAINER_STATUS_PAUSED]:\n raise RuntimeError(\n \"Container is terminated : id={} status={}\".format(\n self.container.id, self.container.status\n )\n )\n time.sleep(interval)\n\n def stop(self):\n if self.is_running():\n self.container.stop()\n\n def start(self):\n if not self.is_running():\n self.container.start()\n\n def delete(self):\n if self.is_running():\n self.container.stop()\n self.container.remove()\n\n def watch(self, show_logs: bool = True):\n \"\"\"Watch container log and wait for container to exit.\"\"\"\n if not show_logs:\n self.container.wait()\n else:\n log_iter = self.container.logs(\n stream=True,\n follow=True,\n )\n for log in log_iter:\n print(log.decode())\n\n self.container.reload()\n exit_code = self.container.attrs[\"State\"][\"ExitCode\"]\n if exit_code != 0:\n raise RuntimeError(\n \"Container run exited failed: exit_code={}\".format(exit_code)\n )" }, { "identifier": "run_container", "path": "pai/common/docker_utils.py", "snippet": "def run_container(\n image_uri: str,\n container_name: Optional[str] = None,\n port: Optional[int] = None,\n environment_variables: Optional[Dict[str, str]] = None,\n command: Optional[Union[List[str], str]] = None,\n entry_point: Optional[Union[List[str], str]] = None,\n volumes: Optional[Dict[str, Any]] = None,\n working_dir: Optional[str] = None,\n gpu_count: Optional[int] = None,\n gpu_device_ids: Optional[List[str]] = None,\n gpu_capabilities: Optional[List[List[str]]] = None,\n) -> ContainerRun:\n \"\"\"Run a container in local.\n\n Args:\n image_uri (str): A docker image uri.\n container_name (str, optional): Name of the container.\n port (int, optional): The port to expose.\n environment_variables (Dict[str, str], optional): Environment variables to set\n in the container.\n command (Union[List[str], str], optional): Command to run the container.\n entry_point (Union[List[str], str], optional): Entry point to run the container.\n volumes (Dict[str, Any], optional): Volumes to mount in the container.\n working_dir (str, optional): Working directory in the container.\n gpu_count (int, optional): Number of GPU devices to request. Set to -1 to\n request all available devices.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_device_ids (List[str], optional): List of strings for GPU device IDs,\n corresponding to `NVIDIA_VISIBLE_DEVICES` in the NVIDIA Runtime.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_capabilities (List[List[str]], optional): This parameter corresponds to\n `NVIDIA_DRIVER_CAPABILITIES` in the NVIDIA Runtime. The default value is\n ``[[\"compute\", \"utility\"]]`` if ``gpu_device_ids`` or ``gpu_count`` is set.\n Available capabilities for the NVIDIA driver can be found in\n https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities.\n\n Returns:\n ContainerRun: A ContainerRun object.\n\n \"\"\"\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n\n client = docker.from_env()\n # use a random host port.\n host_port = randint(49152, 65535)\n\n if gpu_count or gpu_device_ids or gpu_capabilities:\n if not gpu_capabilities:\n gpu_capabilities = [[\"compute\", \"utility\"]]\n device_requests = [\n docker.types.DeviceRequest(\n count=gpu_count,\n device_ids=gpu_device_ids,\n capabilities=gpu_capabilities,\n )\n ]\n else:\n device_requests = []\n\n container = client.containers.run(\n name=container_name,\n entrypoint=entry_point,\n image=image_uri,\n command=command,\n environment=environment_variables,\n ports={port: host_port} if port else None,\n volumes=volumes,\n working_dir=working_dir,\n detach=True,\n device_requests=device_requests,\n )\n container_run = ContainerRun(\n container=container,\n port=host_port,\n )\n return container_run" }, { "identifier": "OssUriObj", "path": "pai/common/oss_utils.py", "snippet": "class OssUriObj(object):\n \"\"\"A class that represents an OSS URI and provides some convenient methods.\"\"\"\n\n def __init__(self, uri: str):\n \"\"\"Constructor for class OssUriObj.\n\n Args:\n uri (str): A string in OSS URI schema: oss://<bucket_name>[.endpoint]/<path/to/file>,\n endpoint in uri is optional.\n \"\"\"\n if not uri.startswith(\"oss://\"):\n raise ValueError(\n \"Invalid OSS URI schema, please provide a string starts with 'oss://'\"\n )\n bucket_name, object_key, endpoint, role_arn = self.parse(uri)\n self.bucket_name = bucket_name\n self.object_key = object_key\n self.endpoint = endpoint\n self.role_arn = role_arn\n\n @classmethod\n def from_bucket_key_endpoint(\n cls, bucket_name: str, object_key: str, endpoint: Optional[str] = None\n ) -> \"OssUriObj\":\n \"\"\"Initialize an OSSUri object from bucket_name, object_key and endpoint.\n\n Args:\n bucket_name (str): The name of the OSS bucket.\n object_key (str): OSS object key/path.\n endpoint (str, optional): Endpoint for the OSS bucket.\n\n Returns:\n OssUriObj: An OssUriObj instance represents the specified OSS object.\n\n \"\"\"\n # OSS object key could not contain leading slashes.\n # Document: https://help.aliyun.com/document_detail/273129.html\n if object_key.startswith(\"/\"):\n logger.warning(\n \"OSS object key should not contain leading slashes, the leading\"\n \" slashes will be removed.\"\n )\n object_key = object_key.lstrip(\"/\")\n\n if endpoint:\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n\n uri = f\"oss://{bucket_name}.{endpoint}/{object_key}\"\n else:\n uri = f\"oss://{bucket_name}/{object_key}\"\n return OssUriObj(uri=uri)\n\n @classmethod\n def parse(cls, oss_uri: str) -> Tuple[str, str, str, str]:\n \"\"\"Parse OSS uri string and returns a tuple of (bucket_name, object_key,\n endpoint, role_arn).\n\n Args:\n oss_uri (str): A string in OSS Uri schema: oss://{bucket_name}.{endpoint}/{object_key}.\n\n Returns:\n Tuple: An tuple of [bucket_name, object_key, endpoint, role_arn].\n\n \"\"\"\n parsed_result = urlparse(oss_uri)\n if parsed_result.scheme != \"oss\":\n raise ValueError(\n \"require OSS uri('oss://[bucket_name]/[object_key]') but \"\n \"given '{}'\".format(oss_uri)\n )\n object_key = parsed_result.path\n if object_key.startswith(\"/\"):\n object_key = object_key[1:]\n\n query = parse_qs(parsed_result.query)\n if \".\" in parsed_result.hostname:\n bucket_name, endpoint = parsed_result.hostname.split(\".\", 1)\n else:\n bucket_name = parsed_result.hostname\n # try to get OSS endpoint from url query.\n if \"endpoint\" in query:\n endpoint = query.get(\"endpoint\")[0]\n elif \"host\" in query:\n endpoint = query.get(\"host\")[0]\n else:\n endpoint = None\n role_arn = query.get(\"role_arn\")[0] if \"role_arn\" in query else None\n\n return bucket_name, object_key, endpoint, role_arn\n\n def get_uri_with_endpoint(self, endpoint: str = None) -> str:\n \"\"\"Get an OSS uri string contains endpoint.\n\n Args:\n endpoint (str): Endpoint of the OSS bucket.\n\n Returns:\n str: An string in OSS uri schema contains endpoint.\n\n \"\"\"\n if not endpoint and not self.endpoint:\n raise ValueError(\"Unknown endpoint for the OSS bucket.\")\n\n return \"oss://{bucket_name}.{endpoint}/{object_key}\".format(\n bucket_name=self.bucket_name,\n endpoint=endpoint or self.endpoint,\n object_key=self.object_key,\n )\n\n def get_dir_uri(self):\n \"\"\"Returns directory in OSS uri string format of the original object.\"\"\"\n _, dirname, _ = self.parse_object_key()\n dir_uri = f\"oss://{self.bucket_name}{dirname}\"\n return dir_uri\n\n @property\n def uri(self) -> str:\n \"\"\"Returns OSS uri in string format.\"\"\"\n return \"oss://{bucket_name}/{object_key}\".format(\n bucket_name=self.bucket_name,\n object_key=self.object_key,\n )\n\n def parse_object_key(self) -> Tuple[bool, str, str]:\n \"\"\"Parse the OSS URI object key, returns a tuple of (is_dir, dir_path, file_name).\n\n Returns:\n namedtuple: An tuple of is_dir, dir_path, file_name.\n \"\"\"\n object_key = self.object_key.strip()\n if object_key.endswith(\"/\"):\n is_dir, dir_path, file_name = True, os.path.join(\"/\", object_key), None\n else:\n idx = object_key.rfind(\"/\")\n if idx < 0:\n is_dir, dir_path, file_name = False, \"/\", object_key\n else:\n is_dir, dir_path, file_name = (\n False,\n os.path.join(\"/\", object_key[: idx + 1]),\n object_key[idx + 1 :],\n )\n return is_dir, dir_path, file_name" }, { "identifier": "download", "path": "pai/common/oss_utils.py", "snippet": "def download(\n oss_path: Union[str, OssUriObj],\n local_path: str,\n bucket: Optional[oss2.Bucket] = None,\n un_tar=False,\n):\n \"\"\"Download OSS objects to local path.\n\n Args:\n oss_path (str): Source OSS path, could be a single OSS object or a OSS\n directory.\n local_path (str): Local path used to store the data from OSS.\n bucket (oss2.Bucket, optional): OSS bucket used to store the upload data. If it\n is not provided, OSS bucket of the default session will be used.\n un_tar (bool, optional): Whether to decompress the downloaded data. It is only\n work for `oss_path` point to a single file that has a suffix \"tar.gz\".\n\n Returns:\n str: A local file path for the downloaded data.\n\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n if not bucket.object_exists(oss_path) or oss_path.endswith(\"/\"):\n # The `oss_path` represents a \"directory\" in the OSS bucket, download the\n # objects which object key is prefixed with `oss_path`.\n # Note: `un_tar` is not work while `oss_path` is a directory.\n\n oss_path += \"/\" if not oss_path.endswith(\"/\") else \"\"\n iterator = oss2.ObjectIteratorV2(\n bucket=bucket,\n prefix=oss_path,\n )\n keys = [obj.key for obj in iterator if not obj.key.endswith(\"/\")]\n for key in tqdm(keys, desc=f\"Downloading: {oss_path}\"):\n rel_path = os.path.relpath(key, oss_path)\n dest = os.path.join(local_path, rel_path)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n _download_with_progress(\n dest,\n object_key=key,\n oss_bucket=bucket,\n )\n return local_path\n else:\n # The `oss_path` represents a single file in OSS bucket.\n if oss_path.endswith(\".tar.gz\") and un_tar:\n # currently, only tar.gz format is supported for un_tar after downloading.\n with tempfile.TemporaryDirectory() as temp_dir:\n target_path = os.path.join(temp_dir, os.path.basename(oss_path))\n _download_with_progress(\n target_path,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n with tarfile.open(name=target_path, mode=\"r\") as t:\n t.extractall(path=local_path)\n\n return local_path\n else:\n os.makedirs(local_path, exist_ok=True)\n dest = os.path.join(local_path, os.path.basename(oss_path))\n _download_with_progress(\n dest,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n\n return dest" }, { "identifier": "is_oss_uri", "path": "pai/common/oss_utils.py", "snippet": "def is_oss_uri(uri: Union[str, bytes]) -> bool:\n \"\"\"Determines whether the given uri is an OSS uri.\n\n Args:\n uri (Union[str, bytes]): A string in OSS URI schema:\n oss://<bucket_name>[.endpoint]/<path/to/file>,\n\n\n Returns:\n bool: True if the given uri is an OSS uri, else False.\n\n \"\"\"\n return bool(uri and isinstance(uri, (str, bytes)) and str(uri).startswith(\"oss://\"))" }, { "identifier": "upload", "path": "pai/common/oss_utils.py", "snippet": "def upload(\n source_path: str,\n oss_path: Union[str, OssUriObj],\n bucket: Optional[oss2.Bucket] = None,\n is_tar: Optional[bool] = False,\n) -> str:\n \"\"\"Upload local source file/directory to OSS.\n\n Examples::\n\n # compress and upload local directory `./src/` to OSS\n >>> upload(source_path=\"./src/\", oss_path=\"path/to/file\",\n ... bucket=session.oss_bucket, is_tar=True)\n\n\n Args:\n source_path (str): Source file local path which needs to be uploaded, can be\n a single file or a directory.\n oss_path (Union[str, OssUriObj]): Destination OSS path.\n bucket (oss2.Bucket): OSS bucket used to store the upload data. If it is not\n provided, OSS bucket of the default session will be used.\n is_tar (bool): Whether to compress the file before uploading (default: False).\n\n Returns:\n str: A string in OSS URI format. If the source_path is directory, return the\n OSS URI representing the directory for uploaded data, else then\n returns the OSS URI points to the uploaded file.\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n source_path_obj = pathlib.Path(source_path)\n if not source_path_obj.exists():\n raise RuntimeError(\"Source path is not exist: {}\".format(source_path))\n\n if is_tar:\n # compress the local data and upload the compressed source data.\n with tempfile.TemporaryDirectory() as dir_name:\n temp_tar_path = _tar_file(\n source_path, os.path.join(dir_name, \"source.tar.gz\")\n )\n dest_path = (\n os.path.join(oss_path, os.path.basename(temp_tar_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=temp_tar_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n elif not source_path_obj.is_dir():\n # if source path is a file, just invoke bucket.put_object.\n\n # if the oss_path is endswith slash, the file will be uploaded to\n # \"{oss_path}{filename}\", else the file will be uploaded to \"{oss_path}\".\n dest_path = (\n os.path.join(oss_path, os.path.basename(source_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=source_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n else:\n # if the source path is a directory, upload all the file under the directory.\n source_files = glob.glob(\n pathname=str(source_path_obj / \"**\"),\n recursive=True,\n )\n if not oss_path.endswith(\"/\"):\n oss_path += \"/\"\n\n files = [f for f in source_files if not os.path.isdir(f)]\n for file_path in files:\n file_path_obj = pathlib.Path(file_path)\n file_relative_path = file_path_obj.relative_to(source_path_obj).as_posix()\n object_key = oss_path + file_relative_path\n _upload_with_progress(\n filename=file_path, object_key=object_key, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, oss_path)" }, { "identifier": "generate_repr", "path": "pai/common/utils.py", "snippet": "def generate_repr(repr_obj, *attr_names: str, **kwargs) -> str:\n \"\"\"Generate a string representation of the given object.\n\n Args:\n repr_obj: The object used to generate the string representation.\n attr_names: A list of attribute names to include in the string representation.\n\n Returns:\n str: A string representation of the object.\n\n \"\"\"\n attrs = {name: getattr(repr_obj, name) for name in attr_names}\n attrs.update(kwargs)\n attr_repr = \", \".join([\"{}={}\".format(k, v) for k, v in attrs.items()])\n cls_name = repr_obj.__class__.__name__\n\n return f\"{cls_name}({attr_repr})\"" }, { "identifier": "is_local_run_instance_type", "path": "pai/common/utils.py", "snippet": "def is_local_run_instance_type(instance_type: str) -> bool:\n \"\"\"Return True if instance_type is local run instance type.\"\"\"\n return instance_type and instance_type.strip() in [\n INSTANCE_TYPE_LOCAL_GPU,\n INSTANCE_TYPE_LOCAL,\n ]" }, { "identifier": "random_str", "path": "pai/common/utils.py", "snippet": "def random_str(n):\n \"\"\"Random string generation with lower case letters and digits.\n\n Args:\n n: Size of generated random string.\n\n Returns:\n str: generated random string.\n\n \"\"\"\n return \"\".join(\n random.choice(string.ascii_lowercase + string.digits) for _ in range(n)\n )" }, { "identifier": "to_plain_text", "path": "pai/common/utils.py", "snippet": "def to_plain_text(\n input_str: str, allowed_characters=DEFAULT_PLAIN_TEXT_ALLOW_CHARACTERS, repl_ch=\"_\"\n):\n \"\"\"Replace characters in input_str if it is not in allowed_characters.\"\"\"\n return \"\".join([c if c in allowed_characters else repl_ch for c in input_str])" }, { "identifier": "DuplicatedMountException", "path": "pai/exception.py", "snippet": "class DuplicatedMountException(PAIException):\n \"\"\"Raised if a OSS path is mounted twice.\"\"\"" }, { "identifier": "MountPathIsOccupiedException", "path": "pai/exception.py", "snippet": "class MountPathIsOccupiedException(PAIException):\n \"\"\"Raised if target mount path is already used.\"\"\"" }, { "identifier": "ImageInfo", "path": "pai/image.py", "snippet": "class ImageInfo(object):\n \"\"\"This class represents information for an image provided by PAI.\n\n Args:\n image_name (str): The name of the image.\n image_uri (str): The URI of the image.\n framework_name (str): The name of the framework installed in the image.\n framework_version (str, optional): The version of the framework (Default None).\n image_scope (str): The scope of the image, could be 'training', 'inference' or\n 'develop'.\n accelerator_type (str, optional): The type of accelerator. Defaults to None.\n python_version (str, optional): The version of Python. Defaults to None.\n \"\"\"\n\n def __repr__(self):\n return (\n \"{}(framework_name={}: framework_version={}: image_scope={}: \"\n \"accelerator_type={}: py_version={})\".format(\n self.__class__.__name__,\n self.framework_name,\n self.framework_version,\n self.image_scope,\n self.accelerator_type,\n self.python_version,\n )\n )\n\n def __init__(\n self,\n image_name: str,\n image_uri: str,\n framework_name: str,\n image_scope: str,\n framework_version: str = None,\n accelerator_type: Optional[str] = None,\n python_version: Optional[str] = None,\n ):\n self.image_name = image_name\n self.image_uri = image_uri\n self.framework_name = framework_name\n self.framework_version = framework_version\n self.accelerator_type = accelerator_type\n self.python_version = python_version\n self.image_scope = image_scope" }, { "identifier": "AsyncPredictor", "path": "pai/predictor.py", "snippet": "class AsyncPredictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"A class that facilitates making predictions to asynchronous prediction service.\n\n Examples::\n\n # Initialize an AsyncPredictor object using the name of a running service.\n async_predictor = AsyncPredictor(service_name=\"example_service\")\n\n # Make a prediction with the service and get the prediction result.\n resp = async_predictor.predict(data=\"YourPredictionData\")\n result = resp.wait()\n\n # Make a prediction with async API.\n import asyncio\n result = asyncio.run(async_predictor.predict_async(data=\"YourPredictionData\"))\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n max_workers: Optional[int] = None,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `AsyncPredictor` object using an existing async prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n max_workers (int): The maximum number of threads that can be used to\n execute the given prediction calls.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n\n super(AsyncPredictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._max_workers = max_workers\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n self._check()\n\n @property\n def max_workers(self):\n return self._max_workers\n\n @max_workers.setter\n def max_workers(self, n: int):\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all submitted tasks in the queue to complete...\")\n self.executor.shutdown()\n self._max_workers = n\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n\n def __del__(self):\n \"\"\"wait for all pending tasks to complete before exit.\"\"\"\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all pending tasks to complete...\")\n self.executor.shutdown()\n super(AsyncPredictor, self).__del__()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") != ServiceType.Async:\n logger.warning(\n \"AsyncPredictor is not recommended to make prediction to a standard \"\n \" prediction service.\"\n )\n\n def _get_result(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = self._send_request(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n logger.debug(\n \"Poll prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n resp.status_code,\n resp.content,\n )\n if resp.status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n\n # Raise exception if status code is not 2xx.\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n resp.status_code, resp.content.decode(\"utf-8\")\n )\n )\n return self._parse_encapsulated_response(resp.json()[0])\n\n def _parse_encapsulated_response(self, data) -> Tuple[int, Dict[str, str], bytes]:\n tags = data[\"tags\"]\n # If the status code from prediction service is not 200, a tag with\n # key 'lastCode' will be added to the tags in response.\n status_code = int(tags.get(\"lastCode\", 200))\n data = base64.b64decode(data[\"data\"])\n # currently, headers are not supported in async prediction service.\n headers = dict()\n return status_code, headers, data\n\n async def _get_result_async(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = await self._send_request_async(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n status_code = resp.status\n content = await resp.read()\n logger.debug(\n \"Get prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n status_code,\n content,\n )\n if status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n if status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n status_code, content.decode(\"utf-8\")\n )\n )\n data = (await resp.json())[0]\n return self._parse_encapsulated_response(data)\n\n def _poll_result(\n self, request_id: str, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = self._get_result(request_id=request_id)\n if not result:\n time.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n code=status_code,\n message=f\"Prediction failed: status_code={status_code}\"\n f\" content={content.decode()}\",\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n async def _poll_result_async(\n self, request_id, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = await self._get_result_async(request_id)\n if not result:\n await asyncio.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n f\"Prediction failed: status_code={status_code} content={content.decode()}\"\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n def _get_request_id(self, resp: requests.models.Response) -> str:\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n f\"Send prediction request failed. status_code={resp.status_code} \"\n f\"message={resp.text}\"\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status_code} content={resp.text}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status_code}\",\n )\n return request_id\n\n async def _get_request_id_async(self, resp: aiohttp.ClientResponse) -> str:\n content = await resp.read()\n if resp.status != 200:\n raise RuntimeError(\n \"Send request to async prediction service failed: status_code={} \"\n \"content={}\".format(resp.status, content.decode(\"utf-8\"))\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status} content={content.decode()}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status}\",\n )\n return request_id\n\n def _predict_fn(\n self,\n data,\n ):\n \"\"\"Make a prediction with the async prediction service.\"\"\"\n # serialize input data\n data = self._handle_input(data)\n resp = self._send_request(data=data)\n request_id = self._get_request_id(resp)\n logger.debug(\"Async prediction RequestId: \", request_id)\n # poll prediction result\n status, headers, content = self._poll_result(\n request_id=request_id, wait_config=WaitConfig()\n )\n\n return self._handle_output(content)\n\n def _wrap_callback_fn(self, cb: Callable):\n \"\"\"Wrap the callback function to handle the prediction result.\"\"\"\n\n @functools.wraps(cb)\n def _(future: Future):\n return cb(future.result())\n\n return _\n\n def predict(\n self,\n data,\n callback: Optional[Union[Callable, List[Callable]]] = None,\n ):\n \"\"\"Make a prediction with the async prediction service.\n\n The input data is serialized using the `serializer.serialize` method before it\n is sent, and the response body is deserialized using the\n `serializer.deserialize` method the prediction result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n \"\"\"\n self._post_init_serializer()\n future = self.executor.submit(self._predict_fn, data)\n\n if isinstance(callback, Callable):\n callback = [callback]\n\n if callback:\n for cb in callback:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def predict_async(self, data, wait_config: WaitConfig = WaitConfig()):\n \"\"\"Make a prediction with the async prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n\n Returns:\n Prediction result.\n\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = await self._send_request_async(data=data)\n request_id = await self._get_request_id_async(resp)\n\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_output(content)\n\n def _raw_predict_fn(self, data, method, path, headers, **kwargs):\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n path=path,\n json=json_data,\n data=data,\n headers=self._build_headers(headers),\n method=method,\n **kwargs,\n )\n request_id = self._get_request_id(resp)\n status, headers, content = self._poll_result(\n request_id, wait_config=WaitConfig()\n )\n return RawResponse(status, headers, content)\n\n def raw_predict(\n self,\n data: Any = None,\n callback: Optional[Union[Callable, List[Callable], None]] = None,\n method: str = \"POST\",\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n **kwargs,\n ) -> AsyncTask:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n\n Examples:\n\n from pai.predictor import AsyncPredictor, AsyncTask\n\n predictor = AsyncPredictor()\n task: AsyncTask = predictor.raw_predict(data=\"YourPredictionData\")\n print(task.result())\n\n \"\"\"\n\n future = self.executor.submit(\n self._raw_predict_fn, data, method, path, headers, **kwargs\n )\n cbs = [callback] if isinstance(callback, Callable) else callback\n if cbs:\n for cb in cbs:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def raw_predict_async(\n self,\n data,\n wait_config: WaitConfig = WaitConfig(),\n method: str = \"POST\",\n headers: Optional[Dict[str, str]] = None,\n path: Optional[str] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction result.\n\n \"\"\"\n if self.service_status not in ServiceStatus.completed_status():\n self.wait_for_ready()\n json_data, data = self._handle_raw_input(data)\n\n resp = await self._send_request_async(\n data=data,\n method=method,\n json=json_data,\n path=path,\n headers=headers,\n **kwargs,\n )\n request_id = await self._get_request_id_async(resp)\n # Polling the prediction result.\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_raw_output(status_code, headers, content)" }, { "identifier": "LocalPredictor", "path": "pai/predictor.py", "snippet": "class LocalPredictor(PredictorBase):\n \"\"\"Perform prediction to a local service running with docker.\"\"\"\n\n def __init__(\n self,\n port: int,\n container_id: Optional[str] = None,\n serializer: Optional[SerializerBase] = None,\n ):\n \"\"\"LocalPredictor initializer.\n\n Args:\n port (int): The port of the local service.\n container_id (str, optional): The container id of the local service.\n serializer (SerializerBase, optional): A serializer object that transforms.\n \"\"\"\n self.container_id = container_id\n self.port = port\n self.serializer = serializer or JsonSerializer()\n self._container_run = (\n self._build_container_run(container_id, port=port)\n if self.container_id\n else None\n )\n\n @classmethod\n def _build_container_run(cls, container_id, port):\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n client = docker.from_env()\n container = client.containers.get(container_id)\n\n return ContainerRun(container=container, port=port)\n\n def predict(self, data) -> Any:\n \"\"\"Perform prediction with the given data.\n\n Args:\n data: The data to be predicted.\n \"\"\"\n request_data = self.serializer.serialize(data=data)\n response = requests.post(\n url=\"http://127.0.0.1:{port}/\".format(port=self._container_run.port),\n data=request_data,\n )\n\n if response.status_code // 100 != 2:\n raise PredictionException(\n code=response.status_code,\n message=response.content,\n )\n\n return self.serializer.deserialize(response.content)\n\n def _build_headers(\n self, headers: Optional[Dict[str, str]] = None\n ) -> Dict[str, str]:\n headers = headers or dict()\n headers[\"User-Agent\"] = http_user_agent(headers.get(\"User-Agent\"))\n return headers\n\n def _build_url(self, path: Optional[str] = None):\n url = \"http://127.0.0.1:{}\".format(self.port)\n if path:\n if path.startswith(\"/\"):\n path = path[1:]\n url = posixpath.join(url, path)\n return url\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n if isinstance(data, (IOBase, bytes, str)):\n # if data is a file-like object, bytes, or string, it will be sent as\n # request body\n json_data, data = None, data\n else:\n # otherwise, it will be treated as a JSON serializable object and sent as\n # JSON.\n json_data, data = data, None\n header = self._build_headers(headers=headers)\n url = self._build_url(path)\n resp = requests.request(\n url=url,\n json=json_data,\n data=data,\n headers=header,\n method=method,\n timeout=timeout,\n **kwargs,\n )\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return resp\n\n def delete_service(self):\n \"\"\"Delete the docker container that running the service.\"\"\"\n if self._container_run:\n self._container_run.stop()\n\n def wait_for_ready(self):\n self._container_run.wait_for_ready()\n # ensure the server is ready.\n self._wait_local_server_ready()\n time.sleep(5)\n\n def _wait_local_server_ready(\n self,\n interval: int = 5,\n ):\n \"\"\"Wait for the local model server to be ready.\"\"\"\n container_run = self._container_run\n while True:\n try:\n # Check whether the container is still running.\n if not container_run.is_running():\n raise RuntimeError(\n \"Container exited unexpectedly, status: {}\".format(\n container_run.status\n )\n )\n\n # Make a HEAD request to the server, just test for connection.\n requests.head(\n f\"http://127.0.0.1:{container_run.port}/\",\n )\n break\n except requests.ConnectionError:\n # ConnectionError means server is not ready.\n logging.debug(\"Waiting for the container to be ready...\")\n time.sleep(interval)\n continue" }, { "identifier": "Predictor", "path": "pai/predictor.py", "snippet": "class Predictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"Predictor is responsible for making prediction to an online service.\n\n The `predictor.predict` method sends the input data to the online prediction service\n and returns the prediction result. The serializer object of the predictor is\n responsible for data transformation when the `predict` method is invoked. The input\n data is serialized using the `serializer.serialize` method before it is sent, and\n the response is deserialized using the `serializer.deserialize` method before the\n prediction result returns.\n\n Examples::\n\n # Initialize a predictor object from an existing service using PyTorch\n # processor.\n torch_predictor = Predictor(service_name=\"example_torch_service\")\n result = torch_predictor.predict(numpy.asarray([[22,33,44], [19,22,33]]))\n assert isinstance(result, numpy.ndarray)\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `Predictor` object using an existing prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n super(Predictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._check()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") == ServiceType.Async:\n logger.warning(\n \"Predictor is not recommended to make prediction to a async\"\n \" prediction service.\"\n )\n\n def predict(self, data):\n \"\"\"Make a prediction with the online prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n\n Returns:\n object: Prediction result.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = self._send_request(\n data,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return self._handle_output(\n resp.content,\n )\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n data=data,\n json=json_data,\n method=method,\n path=path,\n headers=headers,\n timeout=timeout,\n **kwargs,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n return resp" }, { "identifier": "ServiceType", "path": "pai/predictor.py", "snippet": "class ServiceType(object):\n Standard = \"Standard\"\n Async = \"Async\"" }, { "identifier": "SerializerBase", "path": "pai/serializers.py", "snippet": "class SerializerBase(ABC):\n \"\"\"Abstract class for creating a Serializer class for predictor.\"\"\"\n\n @abstractmethod\n def serialize(self, data) -> bytes:\n \"\"\"Serialize the input data to bytes for transmitting.\"\"\"\n\n @abstractmethod\n def deserialize(self, data: bytes):\n \"\"\"Deserialize the data from raw bytes to Python object .\"\"\"\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the online prediction service to complete the serializer instance\n initialization.\n\n The implementation of the `inspect_from_service` method is optional. You only\n need to implement it if your serializer requires additional information from\n service metadata or if it needs to send a request to the service in order to\n be initialized.\n\n \"\"\"" }, { "identifier": "Session", "path": "pai/session.py", "snippet": "class Session(ResourceAPIsContainerMixin):\n \"\"\"A class responsible for communicating with PAI services.\"\"\"\n\n def __init__(\n self,\n region_id: str,\n workspace_id: Optional[str] = None,\n credential_config: Optional[CredentialConfig] = None,\n oss_bucket_name: Optional[str] = None,\n oss_endpoint: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"PAI Session Initializer.\n\n Args:\n credential_config (:class:`alibabacloud_credentials.models.Config`, optional):\n The credential config used to access the Alibaba Cloud.\n region_id (str): The ID of the Alibaba Cloud region where the service\n is located.\n workspace_id (str, optional): ID of the workspace used in the default\n session.\n oss_bucket_name (str, optional): The name of the OSS bucket used in the\n session.\n oss_endpoint (str, optional): The endpoint for the OSS bucket.\n \"\"\"\n\n if not region_id:\n raise ValueError(\"Region ID must be provided.\")\n\n self._credential_config = credential_config\n self._region_id = region_id\n self._workspace_id = workspace_id\n self._oss_bucket_name = oss_bucket_name\n self._oss_endpoint = oss_endpoint\n\n header = kwargs.pop(\"header\", None)\n super(Session, self).__init__(header=header)\n\n @property\n def region_id(self) -> str:\n return self._region_id\n\n @property\n def is_inner(self) -> bool:\n return self._region_id in INNER_REGION_IDS\n\n @property\n def oss_bucket_name(self) -> str:\n return self._oss_bucket_name\n\n @property\n def oss_endpoint(self) -> str:\n return self._oss_endpoint\n\n @property\n def credential_config(self) -> CredentialConfig:\n return self._credential_config\n\n @property\n def workspace_name(self):\n if hasattr(self, \"_workspace_name\") and self._workspace_name:\n return self._workspace_name\n\n if not self._workspace_id:\n raise ValueError(\"Workspace id is not set.\")\n workspace_api_obj = self.workspace_api.get(workspace_id=self._workspace_id)\n self._workspace_name = workspace_api_obj[\"WorkspaceName\"]\n return self._workspace_name\n\n @property\n def provider(self) -> str:\n caller_identity = self._acs_sts_client.get_caller_identity().body\n return caller_identity.account_id\n\n @property\n def workspace_id(self) -> str:\n \"\"\"ID of the workspace used by the session.\"\"\"\n return self._workspace_id\n\n @property\n def console_uri(self) -> str:\n \"\"\"The web console URI for PAI service.\"\"\"\n if self.is_inner:\n return \"https://pai-next.alibaba-inc.com\"\n else:\n return \"https://pai.console.aliyun.com/console\"\n\n def _init_oss_config(\n self,\n ):\n \"\"\"Initialize a OssConfig instance.\"\"\"\n if not self._oss_bucket_name:\n # If OSS bucket name is not provided, use the default OSS storage URI\n # that is configured for the workspace.\n default_oss_uri = self.workspace_api.get_default_storage_uri(\n self.workspace_id\n )\n if not default_oss_uri:\n raise RuntimeError(\n \"No default OSS URI is configured for the workspace.\"\n )\n oss_uri_obj = OssUriObj(default_oss_uri)\n self._oss_bucket_name = oss_uri_obj.bucket_name\n\n if not self._oss_endpoint:\n self._oss_endpoint = self._get_default_oss_endpoint()\n\n def _get_oss_auth(self):\n auth = oss2.ProviderAuth(\n credentials_provider=CredentialProviderWrapper(\n config=self._credential_config,\n )\n )\n return auth\n\n @property\n def oss_bucket(self):\n \"\"\"A OSS2 bucket instance used by the session.\"\"\"\n if not self._oss_bucket_name or not self._oss_endpoint:\n self._init_oss_config()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=self._oss_endpoint,\n bucket_name=self._oss_bucket_name,\n )\n return oss_bucket\n\n def save_config(self, config_path=None):\n \"\"\"Save the configuration of the session to a local file.\"\"\"\n attrs = {key.lstrip(\"_\"): value for key, value in vars(self).items()}\n config = {\n key: value\n for key, value in attrs.items()\n if key in _DEFAULT_CONFIG_KEYS and value is not None\n }\n\n config_path = config_path or DEFAULT_CONFIG_PATH\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path, \"w\") as f:\n f.write(json.dumps(config, indent=4))\n logger.info(\"Write PAI config succeed: config_path=%s\" % config_path)\n\n def patch_oss_endpoint(self, oss_uri: str):\n oss_uri_obj = OssUriObj(oss_uri)\n if oss_uri_obj.endpoint:\n return oss_uri\n\n # patch endpoint using current OSS bucket endpoint.\n endpoint = self.oss_bucket.endpoint\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n return \"oss://{bucket_name}.{endpoint}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n endpoint=endpoint,\n key=oss_uri_obj.object_key,\n )\n\n def _get_default_oss_endpoint(self) -> str:\n \"\"\"Returns a default OSS endpoint.\"\"\"\n\n # OSS Endpoint document:\n # https://help.aliyun.com/document_detail/31837.html\n internet_endpoint = \"oss-{}.aliyuncs.com\".format(self.region_id)\n internal_endpoint = \"oss-{}-internal.aliyuncs.com\".format(self.region_id)\n\n return (\n internet_endpoint\n if is_domain_connectable(internal_endpoint)\n else internet_endpoint\n )\n\n def get_oss_bucket(self, bucket_name: str, endpoint: str = None) -> oss2.Bucket:\n \"\"\"Get a OSS bucket using the credentials of the session.\n\n Args:\n bucket_name (str): The name of the bucket.\n endpoint (str): Endpoint of the bucket.\n\n Returns:\n :class:`oss2.Bucket`: A OSS bucket instance.\n\n \"\"\"\n endpoint = endpoint or self._oss_endpoint or self._get_default_oss_endpoint()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=endpoint,\n bucket_name=bucket_name,\n )\n return oss_bucket\n\n @classmethod\n def get_storage_path_by_category(\n cls, category: str, dir_name: Optional[str] = None\n ) -> str:\n \"\"\"Get an OSS storage path for the resource.\n\n Args:\n category (str): The category of the resource.\n dir_name (str, optional): The directory name of the resource.\n\n Returns:\n str: A OSS storage path.\n\n \"\"\"\n dir_name = dir_name or datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n storage_path = posixpath.join(\"pai\", category, dir_name).strip()\n\n if not storage_path.endswith(\"/\"):\n storage_path += \"/\"\n return storage_path\n\n def is_supported_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n return bool(machine_spec)\n\n def is_gpu_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n if not machine_spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for training job. \"\n \"Please provide a supported instance type.\"\n )\n return machine_spec[\"AcceleratorType\"] == \"GPU\"\n\n def is_supported_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n return bool(spec)\n\n def is_gpu_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n\n if not spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for deploying. \"\n \"Please provide a supported instance type.\"\n )\n return bool(spec[\"GPU\"])" }, { "identifier": "get_default_session", "path": "pai/session.py", "snippet": "def get_default_session() -> \"Session\":\n \"\"\"Get the default session used by the program.\n\n If the global default session is set, the function will try to initialize\n a session from config file.\n\n Returns:\n :class:`pai.session.Session`: The default session.\n\n \"\"\"\n global _default_session\n if not _default_session:\n config = load_default_config_file()\n if not config:\n return\n _default_session = Session(**config)\n return _default_session" } ]
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,509
# if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config:
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str, session: Session = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. .. note:: If source is a local path, it will be uploaded to the OSS bucket and mounted. If source is a OSS path, it will be mounted directly. Args: source (str): The source storage to be attached, currently only support OSS path in OSS URI format and local path. mount_path (str): The mount path in the container. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: Dict[str, Any]: The storage config. Raises: DuplicateMountException: If the mount path is already used or source OSS path is mounted to the container. Examples:: # Mount a OSS storage path to the running container. >>> inference_spec.mount("oss://<YourOssBucket>/path/to/directory/model.json", ... "/ml/model/") # 'Mount' a local path to the running container. >>> inference_spec.mount("/path/to/your/data/", "/ml/model/") """ session = session or get_default_session() # TODO: supports more storages, such as NAS, PAI Dataset, PAI CodeSource, etc. if not isinstance(source, str): raise ValueError( "Parameter should be a string which represents an OSS storage path" " or a local file path." ) if "storage" in self._cfg_dict: configs = self._cfg_dict.get("storage", []) else: configs = [] uris = set() for conf in configs: # check if target mount path is already used. if conf.get("mount_path") == mount_path: raise MountPathIsOccupiedException( f"The mount path '{mount_path}' has already been used." ) mount_uri = conf.get("oss", {}).get("path") uris.add(mount_uri) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config:
updated_args = git_utils.git_clone_repo(
0
2023-12-01 01:40:12+00:00
24k
mpenning/ciscoconfparse2
tests/conftest.py
[ { "identifier": "CiscoConfParse", "path": "ciscoconfparse2/ciscoconfparse2.py", "snippet": "class CiscoConfParse(object):\n \"\"\"Parse Cisco IOS configurations and answer queries about the configs.\"\"\"\n config: Optional[Union[str,List[str]]] = None\n syntax: str = \"ios\"\n encoding: str = locale.getpreferredencoding()\n loguru: bool = True\n comment_delimiters: List[str] = []\n auto_indent_width: int = -1\n linesplit_rgx: str = r\"\\r*\\n\"\n ignore_blank_lines: bool = False\n auto_commit: bool = None\n factory: bool = False\n debug: int = 0\n\n # Attributes\n config_objs: Any = None\n finished_config_parse: bool = False\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def __init__(\n self,\n config: Optional[Union[str,List[str],tuple[str, ...]]]=None,\n syntax: str=\"ios\",\n encoding: str=locale.getpreferredencoding(),\n loguru: bool=True,\n comment_delimiters: List[str]=None,\n auto_indent_width: int=-1,\n linesplit_rgx: str=r\"\\r*\\n\",\n ignore_blank_lines: bool=False,\n auto_commit: bool=True,\n factory: bool=False,\n debug: int=0,\n ):\n \"\"\"\n Initialize CiscoConfParse.\n\n .. note::\n\n ``comment_delimiters`` always assumes the delimiter is one character wide.\n\n .. note::\n\n ``ignore_blank_lines`` changes the original ciscoconfparse default value.\n\n\n :param config: A list of configuration lines or the filepath to the configuration.\n :type config: Union[str,List[str],tuple[str, ...]]\n :param syntax: The configuration type, default to 'ios'; it must be one of: 'ios', 'nxos', 'iosxr', 'asa', 'junos'. Use 'junos' for any brace-delimited network configuration (including F5, Palo Alto, etc...).\n :type syntax: str\n :param encoding: The configuration encoding, default to ``locale.getpreferredencoding()``.\n :type encoding: str\n :param loguru: Control whether CiscoConfParse should enable ``loguru``, default to True.\n :type loguru: bool\n :param comment_delimiters: String comment delimiters. This should only be changed when parsing non-Cisco configurations, which do not use a '!' as the comment delimiter. ``comment`` defaults to '!'. This value can hold multiple characters in case the config uses multiple characters for comment delimiters.\n :type comment_delimiters: List[str]\n :param auto_indent_width: Defaults to -1, and should be kept that way unless you're working on a very tricky config parsing problem.\n :type auto_indent_width: int\n :param linesplit_rgx: Used when parsing configuration files to find\n where new configuration lines are; it is best\n to leave this as the default, unless you're\n working on a system that uses unusual line\n terminations (for instance something besides\n Unix, OSX, or Windows).\n :type linesplit_rgx: str\n :param ignore_blank_lines: Defaults to False; when this is set True,\n ciscoconfparse2 ignores blank configuration\n lines.\n :type ignore_blank_lines: bool\n :param auto_commit: Control whether CiscoConfParse should auto-commit config changes when possible, default to True.\n However, parsing very large configs may be faster with ``auto_commit=False``.\n :type auto_commit: bool\n :param factory: Control whether CiscoConfParse should enable the\n beta-quality configuration parameter parser,\n default to False.\n :type factory: bool\n :param debug: Control CiscoConfParse debug output, default is 0.\n :type debug: int\n :return: A CiscoConfParse object\n :rtype: :py:class:`~ciscoconfparse2.CiscoConfParse`\n\n This example illustrates how to parse a simple Cisco IOS configuration\n with :class:`~ciscoconfparse2.CiscoConfParse` into a variable called\n ``parse``. This example also illustrates what the ``config_objs``\n and ``ioscfg`` attributes contain.\n\n .. code-block:: python\n :emphasize-lines: 6\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = [\n ... 'logging trap debugging',\n ... 'logging 172.28.26.15',\n ... ]\n >>> parse = CiscoConfParse(config=config)\n >>> parse\n <CiscoConfParse: 2 lines / syntax: ios / comment delimiter: '!' / factory: False>\n >>> parse.config_objs\n <ConfigList, comment='!', conf=[<IOSCfgLine # 0 'logging trap debugging'>, <IOSCfgLine # 1 'logging 172.28.26.15'>]>\n >>> parse.text\n ['logging trap debugging', 'logging 172.28.26.15']\n >>>\n\n Attributes\n ----------\n comment_delimiters : list\n A list of strings containing the comment-delimiters. Default: [\"!\"]\n objs : :class:`ConfigList`\n An alias for ``config_objs``\n config_objs : :class:`ConfigList`\n A custom list, which contains all parsed :class:`~models_cisco.IOSCfgLine` instances.\n debug : int\n An int to enable verbose config parsing debugs. Default 0.\n ioscfg : list\n A list of text configuration strings\n openargs : dict\n Returns a dictionary of valid arguments for `open()` (these change based on the running python version).\n syntax : str\n A string holding the configuration type. Default: 'ios'. Must be one of: 'ios', 'nxos', 'iosxr', 'asa', 'junos'. Use 'junos' for any brace-delimited network configuration (including F5, Palo Alto, etc...).\n\n\n \"\"\"\n\n if syntax not in ALL_VALID_SYNTAX:\n error = f\"{syntax} is not a valid syntax.\"\n logger.error(error)\n raise InvalidParameters(error)\n\n if comment_delimiters is None:\n comment_delimiters = get_comment_delimiters(syntax=syntax)\n elif isinstance(comment_delimiters, list):\n for comment_delimiter in comment_delimiters:\n if not isinstance(comment_delimiter, str):\n error = f\"`{comment_delimiter}` is not a valid string comment_delimiters\"\n logger.critical(error)\n raise InvalidParameters(error)\n elif not len(comment_delimiter) == 1:\n error = f\"`{comment_delimiter}` must be a single string character.\"\n logger.critical(error)\n raise InvalidParameters(error)\n elif not isinstance(comment_delimiters, list):\n error = \"'comment_delimiters' must be a list of string comment delimiters\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if int(auto_indent_width) <= 0:\n auto_indent_width = int(self.get_auto_indent_from_syntax(syntax=syntax))\n\n ######################################################################\n # Log an error if parsing with `ignore_blank_lines=True` and\n # `factory=False`\n ######################################################################\n if ignore_blank_lines is True and factory is True:\n error = \"ignore_blank_lines and factory are not supported together.\"\n logger.error(error)\n raise NotImplementedError(error)\n\n ######################################################################\n # Reconfigure loguru if read_only is True\n ######################################################################\n if loguru is False:\n active_loguru_handlers = configure_loguru(read_only=loguru, active_handlers=globals()[\"ACTIVE_LOGURU_HANDLERS\"], debug=debug)\n globals()[\"ACTIVE_LOGURU_HANDLERS\"] = active_loguru_handlers\n if debug > 0:\n logger.warning(f\"Disabled loguru enqueue because loguru={loguru}\")\n\n if not (isinstance(syntax, str) and (syntax in ALL_VALID_SYNTAX)):\n error = f\"'{syntax}' is an unknown syntax\"\n logger.error(error)\n raise ValueError(error)\n\n # all IOSCfgLine object instances...\n self.finished_config_parse = False\n\n self.syntax = syntax\n self.encoding = encoding or ENCODING\n self.loguru = bool(loguru)\n self.comment_delimiters = comment_delimiters\n self.auto_indent_width = int(auto_indent_width)\n self.debug = int(debug)\n self.factory = bool(factory)\n self.linesplit_rgx = linesplit_rgx\n self.ignore_blank_lines = ignore_blank_lines\n self.auto_commit = auto_commit\n\n self.config_objs = None\n\n\n # Convert an None config into an empty list\n if config is None:\n config = []\n\n if len(config) > 0:\n try:\n correct_element_types = []\n for ii in config:\n # Check whether the elements are the correct types...\n if isinstance(ii, (str, BaseCfgLine)):\n correct_element_types.append(True)\n else:\n correct_element_types.append(False)\n\n elements_have_len = all(correct_element_types)\n except AttributeError:\n elements_have_len = False\n except TypeError:\n elements_have_len = False\n else:\n elements_have_len = None\n\n if elements_have_len is False:\n error = \"All ConfigList elements must have a length()\"\n logger.error(error)\n raise InvalidParameters(error)\n\n # Read the configuration lines and detect invalid inputs...\n # tmp_lines = self._get_ccp_lines(config=config, logger=logger)\n if isinstance(config, (str, pathlib.Path,)):\n tmp_lines = self.read_config_file(filepath=config, linesplit_rgx=r\"\\r*\\n\")\n elif isinstance(config, Sequence):\n tmp_lines = config\n else:\n error = f\"Cannot read config from {config}\"\n logger.critical(error)\n raise ValueError(error)\n\n # conditionally strip off junos-config braces and other syntax\n # parsing issues...\n config_lines = self.handle_ccp_brace_syntax(tmp_lines=tmp_lines, syntax=syntax)\n if self.check_ccp_input_good(config=config_lines, logger=logger) is False:\n error = f\"Cannot parse config=`{tmp_lines}`\"\n logger.critical(error)\n raise ValueError(error)\n\n if self.debug > 0:\n logger.info(\"assigning self.config_objs = ConfigList()\")\n\n self.config_objs = ConfigList(\n initlist=config_lines,\n comment_delimiters=comment_delimiters,\n debug=debug,\n factory=factory,\n ignore_blank_lines=ignore_blank_lines,\n syntax=syntax,\n ccp_ref=self,\n auto_commit=auto_commit,\n )\n\n ######################################################################\n # Set the commit checkpoint after the initial parse... this\n # avoids the need to manually call CiscoConfParse.commit()\n # after parsing\n ######################################################################\n self.commit()\n\n # IMPORTANT this MUST not be a lie :-)...\n self.finished_config_parse = True\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def handle_ccp_brace_syntax(self, tmp_lines: list=None, syntax: str=None) -> List[str]:\n \"\"\"Deal with brace-delimited syntax issues, such as conditionally discarding junos closing brace-lines.\n\n :param tmp_lines: Brace-delimited text configuration lines\n :type tmp_lines: List[str]\n :param syntax: Syntax of the configuration lines\n :type syntax: str\n :return: Configuration lines without braces\n :rtype: List[str]\n \"\"\"\n\n if syntax not in ALL_VALID_SYNTAX:\n error = f\"{syntax} parser factory is not yet enabled; use factory=False\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if not isinstance(tmp_lines, (list, tuple)):\n error = f\"handle_ccp_brace_syntax(tmp_lines={tmp_lines}) must not be None\"\n logger.error(error)\n raise InvalidParameters(error)\n\n ######################################################################\n # Explicitly handle all brace-parsing factory syntax here...\n ######################################################################\n if syntax == \"junos\":\n config_lines = convert_junos_to_ios(tmp_lines, comment_delimiters=[\"#\"])\n elif syntax in ALL_VALID_SYNTAX:\n config_lines = tmp_lines\n else:\n error = f\"handle_ccp_brace_syntax(syntax=`{syntax}`) is not yet supported\"\n logger.error(error)\n raise InvalidParameters(error)\n\n return config_lines\n\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def get_auto_indent_from_syntax(self, syntax: str=None) -> int:\n \"\"\"Return an auto indent for the 'syntax' string in question\n\n :param syntax: Syntax of the configuration lines\n :type syntax: str\n :return: Number of spaces for each indent level\n :rtype: int\n \"\"\"\n if not isinstance(syntax, str):\n error = \"The 'syntax' parameter must be a string\"\n logger.error(error)\n raise InvalidParameters(error)\n\n if syntax not in ALL_VALID_SYNTAX:\n error = f\"syntax='{syntax}' is not yet supported\"\n logger.error(error)\n raise InvalidParameters(error)\n\n indent_width = -1\n if syntax == \"ios\":\n indent_width = 1\n elif syntax == \"asa\":\n indent_width = 1\n elif syntax == \"iosxr\":\n indent_width = 1\n elif syntax == \"nxos\":\n indent_width = 2\n elif syntax == \"junos\":\n indent_width = 4\n else:\n error = \"Unexpected condition in get_auto_indent_from_syntax()\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n return int(indent_width)\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def __repr__(self) -> str:\n \"\"\"Return a string that represents this CiscoConfParse object instance. The number of lines embedded in the string is calculated from the length of the config_objs attribute.\n\n :return: A representation of this object.\n :rtype: str\n \"\"\"\n if self.config_objs is None:\n num_lines = 0\n elif isinstance(self.config_objs, Sequence):\n num_lines = len(self.config_objs)\n return (\n \"<CiscoConfParse: %s lines / syntax: %s / comment delimiters: %s / auto_indent_width: %s / factory: %s / ignore_blank_lines: %s / encoding: '%s' / auto_commit: %s>\"\n % (\n num_lines,\n self.syntax,\n self.comment_delimiters,\n self.auto_indent_width,\n self.factory,\n self.ignore_blank_lines,\n self.encoding,\n self.auto_commit,\n )\n )\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def read_config_file(self, filepath: str=None, linesplit_rgx: str=r\"\\r*\\n\") -> List[str]:\n \"\"\"Read the config lines from the filepath. Return the list of text configuration commands or raise an error.\n\n :param filepath: Filepath to be read\n :type filepath: str\n :param linesplit_rgx: Regex to use for line splits\n :type filepath: str\n :return: The output configuration\n :rtype: List[str]\n \"\"\"\n\n if self.finished_config_parse is not False:\n raise RequirementFailure()\n\n valid_path_variable = False\n if filepath is None:\n error = \"Filepath: None is invalid\"\n logger.critical(error)\n raise FileNotFoundError(error)\n elif isinstance(filepath, (str, pathlib.Path,)):\n valid_path_variable = True\n\n if valid_path_variable and not os.path.exists(filepath):\n error = f\"Filepath: {filepath} does not exist\"\n logger.critical(error)\n raise FileNotFoundError(error)\n\n config_lines = None\n\n _encoding = self.openargs['encoding']\n if valid_path_variable is True and os.path.isfile(filepath) is True:\n # config string - assume a filename...\n if self.debug > 0:\n logger.debug(f\"reading config from the filepath named '{filepath}'\")\n\n elif valid_path_variable is True and os.path.isfile(filepath) is False:\n if self.debug > 0:\n logger.debug(f\"filepath not found - '{filepath}'\")\n try:\n _ = open(file=filepath, **self.openargs)\n except FileNotFoundError:\n error = f\"\"\"FATAL - Attempted to open(file='{filepath}', mode='r', encoding=\"{_encoding}\"); the filepath named:\"{filepath}\" does not exist.\"\"\"\n logger.critical(error)\n raise FileNotFoundError(error)\n\n except OSError:\n error = f\"\"\"FATAL - Attempted to open(file='{filepath}', mode='r', encoding=\"{_encoding}\"); OSError opening \"{filepath}\".\"\"\"\n logger.critical(error)\n raise OSError(error)\n\n except BaseException:\n logger.critical(f\"Cannot open {filepath}\")\n raise BaseException\n\n else:\n error = f'Unexpected condition processing filepath: {filepath}'\n logger.critical(error)\n raise ValueError(error)\n\n # Read the file from disk and return the list of config statements...\n try:\n with open(file=filepath, **self.openargs) as fh:\n text = fh.read()\n rgx = re.compile(linesplit_rgx)\n config_lines = rgx.split(text)\n return config_lines\n\n except OSError:\n error = f\"CiscoConfParse could not open() the filepath named '{filepath}'\"\n logger.critical(error)\n raise OSError(error)\n\n except BaseException as eee:\n error = f\"FATAL - {eee}\"\n logger.critical(error)\n raise eee\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def check_ccp_input_good(self, config: Union[List[str],tuple[str, ...]]=None, logger: logger=None) -> bool:\n \"\"\"\n :param config: Sequence of commands\n :type config: Union[List[str], tuple[str, ...]]\n :param logger: loguru.logger() reference\n :type logger: loguru._logger.Logger\n :return: Whether the config can be parsed\n :rtype: bool\n \"\"\"\n\n if self.finished_config_parse is not False:\n raise RequirementFailure()\n\n if isinstance(config, Sequence):\n # Here we assume that `config` is a list of text config lines...\n #\n # config list of text lines...\n if self.debug > 0:\n logger.debug(\n f\"parsing config stored in the config variable: `{config}`\"\n )\n return True\n\n else:\n return False\n\n @property\n @logger.catch(reraise=True)\n def openargs(self) -> Dict[str,Union[str,None]]:\n \"\"\"\n Fix Py3.5 deprecation of universal newlines\n\n .. note::\n\n Ref original ciscoconfparse Github issue #114; also see\n https://softwareengineering.stackexchange.com/q/298677/23144.\n\n :return: The proper encoding parameters\n :rtype: Dict[str,Union[str,None]]\n \"\"\"\n if sys.version_info >= (\n 3,\n 6,\n ):\n retval = {\"mode\": \"r\", \"newline\": None, \"encoding\": self.encoding}\n else:\n retval = {\"mode\": \"rU\", \"encoding\": self.encoding}\n return retval\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def get_text(self) -> List[str]:\n \"\"\"\n :return: All text configuration statements\n :rtype: List[str]\n\n .. warning::\n\n The original ciscoconfparse ``ioscfg`@property has been renamed to ``get_text()``.\n \"\"\"\n return [ii.text for ii in self.config_objs]\n\n # This method is on CiscoConfParse()\n @property\n @logger.catch(reraise=True)\n def objs(self) -> List[BaseCfgLine]:\n \"\"\"CiscoConfParse().objs is an alias for the CiscoConfParse().config_objs property.\n\n :returns: All configuration objects.\n :rtype: List[BaseCfgLine]\n \"\"\"\n if self.config_objs is None:\n error = (\n \"config_objs is set to None. config_objs should be a ConfigList() of configuration-line objects\"\n )\n logger.error(error)\n raise ValueError(error)\n return self.config_objs\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def commit(self) -> None:\n \"\"\"Use :py:func:`~ciscoconfparse2.CiscoConfParse.commit` to manually fix up ``config_objs`` relationships after modifying a parsed configuration. This method is slow; try to batch calls to :func:`~ciscoconfparse2.CiscoConfParse.commit()` if possible.\n\n :return: None\n :rtype: None\n\n .. warning::\n\n If you modify a configuration after parsing it with :class:`~ciscoconfparse2.CiscoConfParse`,\n you *must* call :py:meth:`~ciscoconfparse2.CiscoConfParse.commit` or\n :py:meth:`~ciscoconfparse2.CiscoConfParse.commit` before searching the configuration\n again with methods such as :func:`~ciscoconfparse2.CiscoConfParse.find_objects`. Failure\n to call :py:meth:`~ciscoconfparse2.CiscoConfParse.commit` or\n :py:meth:`~ciscoconfparse2.CiscoConfParse.commit` on config modifications could\n lead to unexpected search results.\n \"\"\"\n\n # perform a commit on the ConfigList()\n self.config_objs.commit()\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def _find_child_object_branches(\n self,\n parent_obj: BaseCfgLine,\n childspec: str,\n regex_flags: str,\n debug: int=0,\n ) -> list:\n \"\"\"\n :param parent_obj: The parent object to be searched\n :type parent_obj: BaseCfgLine\n :param childspec: Regex string to match against child objects\n :type childspec: str\n :param regex_flags: Regex flags to apply to the aforementioned match\n :type regex_flags: str\n :param debug: Debug level of the operation\n :type debug: int\n :return: Children matching ``childspec``\n :rtype: List[BaseCfgLine]\n \"\"\"\n # I'm not using parent_obj.re_search_children() because\n # re_search_children() doesn't return None for no match...\n\n if debug > 1:\n msg = f\"\"\"Calling _find_child_object_branches(\nparent_obj={parent_obj},\nchildspec='{childspec}',\nregex_flags='{regex_flags}',\ndebug={debug},\n)\"\"\"\n logger.info(msg)\n\n # Get the child objects from parent objects\n if parent_obj is None:\n children = self._find_line_OBJ(\n linespec=childspec,\n exactmatch=False,\n )\n else:\n children = parent_obj.children\n\n # Find all child objects which match childspec...\n segment_list = [\n cobj\n for cobj in children\n if re.search(childspec, cobj.text, regex_flags)\n ]\n # Return [None] if no children matched...\n if len(segment_list) == 0:\n segment_list = [None]\n\n if debug > 1:\n logger.info(f\" _find_child_object_branches() returns segment_list={segment_list}\")\n return segment_list\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def find_object_branches(\n self,\n branchspec: Union[tuple[str, ...],List[str]]=(),\n regex_flags: Union[re.RegexFlag,int]=0,\n regex_groups: bool=False,\n empty_branches: bool=False,\n reverse: bool=False,\n debug: int=0,\n ) -> List[List[BaseCfgLine]]:\n r\"\"\"Iterate over a tuple of regular expression strings in `branchspec` and return matching objects in a list of lists (consider it similar to a table of matching config objects). `branchspec` expects to start at some ancestor and walk through the nested object hierarchy (with no limit on depth).\n\n Previous CiscoConfParse() methods only handled a single parent regex and single child regex (such as :func:`~ciscoconfparse2.CiscoConfParse.find_objects`).\n\n Transcend past one-level of parent-child relationship parsing to include multiple nested 'branches' of a single family (i.e. parents, children, grand-children, great-grand-children, etc). The result of handling longer regex chains is that it flattens what would otherwise be nested loops in your scripts; this makes parsing heavily-nested configuratations like Juniper, Palo-Alto, and F5 much simpler. Of course, there are plenty of applications for \"flatter\" config formats like IOS.\n\n Return a list of lists (of object 'branches') which are nested to the same depth required in `branchspec`. However, unlike most other CiscoConfParse() methods, return an explicit `None` if there is no object match. Returning `None` allows a single search over configs that may not be uniformly nested in every branch.\n\n .. warning::\n\n The ``allow_none`` from original ciscoconfparse is removed and no longer a configuration option; it will always be regarded as True.\n\n :param branchspec: Regular expressions to be matched.\n :type branchspec: Union[tuple[str, ...],List[str]]\n :param regex_flags: Chained regular expression flags, such as `re.IGNORECASE|re.MULTILINE`\n :type regex_flags: Union[re.RegexFlags,int]\n :param regex_groups: Return a tuple of re.Match groups instead of the matching configuration objects, default is False.\n :type regex_groups: bool\n :param empty_branches: If True, return a list of None statements if there is no match; before version 1.9.49, this defaulted True.\n :type empty_branches: bool\n :param reverse: If True, reverse the return value order.\n :type reverse: bool\n :param debug: Set > 0 for debug messages\n :type debug: int\n :return: A list of lists of matching :class:`~ciscoconfparse2.IOSCfgLine` objects\n :rtype: List[List[BaseCfgLine]]\n\n\n .. code-block:: python\n :emphasize-lines: 30,31\n\n >>> from operator import attrgetter\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = [\n ... 'ltm pool FOO {',\n ... ' members {',\n ... ' k8s-05.localdomain:8443 {',\n ... ' address 192.0.2.5',\n ... ' session monitor-enabled',\n ... ' state up',\n ... ' }',\n ... ' k8s-06.localdomain:8443 {',\n ... ' address 192.0.2.6',\n ... ' session monitor-enabled',\n ... ' state down',\n ... ' }',\n ... ' }',\n ... '}',\n ... 'ltm pool BAR {',\n ... ' members {',\n ... ' k8s-07.localdomain:8443 {',\n ... ' address 192.0.2.7',\n ... ' session monitor-enabled',\n ... ' state down',\n ... ' }',\n ... ' }',\n ... '}',\n ... ]\n >>> parse = CiscoConfParse(config=config, syntax='junos', comment='#')\n >>>\n >>> branchspec = (r'ltm\\spool', r'members', r'\\S+?:\\d+', r'state\\sup')\n >>> branches = parse.find_object_branches(branchspec=branchspec)\n >>>\n >>> # We found three branches\n >>> len(branches)\n 3\n >>> # Each branch must match the length of branchspec\n >>> len(branches[0])\n 4\n >>> # Print out one object 'branch'\n >>> branches[0]\n [<IOSCfgLine # 0 'ltm pool FOO'>, <IOSCfgLine # 1 ' members' (parent is # 0)>, <IOSCfgLine # 2 ' k8s-05.localdomain:8443' (parent is # 1)>, <IOSCfgLine # 5 ' state up' (parent is # 2)>]\n >>>\n >>> # Get the a list of text lines for this branch...\n >>> [ii.text for ii in branches[0]]\n ['ltm pool FOO', ' members', ' k8s-05.localdomain:8443', ' state up']\n >>>\n >>> # Get the config text of the root object of the branch...\n >>> branches[0][0].text\n 'ltm pool FOO'\n >>>\n >>> # Note: `None` in branches[1][-1] because of no regex match\n >>> branches[1]\n [<IOSCfgLine # 0 'ltm pool FOO'>, <IOSCfgLine # 1 ' members' (parent is # 0)>, <IOSCfgLine # 6 ' k8s-06.localdomain:8443' (parent is # 1)>, None]\n >>>\n >>> branches[2]\n [<IOSCfgLine # 10 'ltm pool BAR'>, <IOSCfgLine # 11 ' members' (parent is # 10)>, <IOSCfgLine # 12 ' k8s-07.localdomain:8443' (parent is # 11)>, None]\n \"\"\"\n if self.config_objs.search_safe is False:\n error = \"The configuration has changed since the last commit; a config search is not safe.\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n if isinstance(branchspec, list):\n branchspec = tuple(branchspec)\n\n if isinstance(branchspec, tuple):\n if branchspec == ():\n error = \"find_object_branches(): branchspec must not be empty\"\n logger.error(error)\n raise ValueError(error)\n\n else:\n error = \"find_object_branches(): Please enclose the branchspec regular expressions in a Python tuple\"\n logger.error(error)\n raise ValueError(error)\n\n branches = []\n # iterate over the regular expressions in branchspec\n for idx, childspec in enumerate(branchspec):\n # FIXME: Insert debugging here...\n if idx == 0:\n # Get matching 'root' objects from the config\n next_kids = self._find_child_object_branches(\n parent_obj=None,\n childspec=childspec,\n regex_flags=regex_flags,\n debug=debug,\n )\n # Start growing branches from the segments we received...\n branches = [[kid] for kid in next_kids]\n\n else:\n new_branches = []\n for branch in branches:\n # Extend existing branches into the new_branches\n if branch[-1] is not None:\n # Find children to extend the family branch...\n next_kids = self._find_child_object_branches(\n parent_obj=branch[-1],\n childspec=childspec,\n regex_flags=regex_flags,\n debug=debug,\n )\n\n for kid in next_kids:\n # Fork off a new branch and add each matching kid...\n tmp = copy.copy(branch)\n tmp.append(kid)\n new_branches.append(tmp)\n else:\n branch.append(None)\n new_branches.append(branch)\n\n # Ensure we have the most recent branches...\n branches = new_branches\n\n branches = new_branches\n\n # If regex_groups is True, assign regexp matches to the return matrix.\n if regex_groups is True:\n return_matrix = []\n # branchspec = (r\"^interfaces\", r\"\\s+(\\S+)\", r\"\\s+(unit\\s+\\d+)\", r\"family\\s+(inet)\", r\"address\\s+(\\S+)\")\n # for idx_matrix, row in enumerate(self.find_object_branches(branchspec)):\n for _, row in enumerate(branches):\n if not isinstance(row, Sequence):\n raise RequirementFailure()\n\n # Before we check regex capture groups, allocate an \"empty return_row\"\n # of the correct length...\n return_row = [(None,)] * len(branchspec)\n\n # Populate the return_row below...\n # return_row will be appended to return_matrix...\n for idx, element in enumerate(row):\n if element is None:\n return_row[idx] = (None,)\n\n else:\n regex_result = re.search(branchspec[idx], element.text)\n if regex_result is not None:\n # Save all the regex capture groups in matched_capture...\n matched_capture = regex_result.groups()\n if len(matched_capture) == 0:\n # If the branchspec groups() matches are a\n # zero-length tuple, populate this return_row\n # with the whole element's text\n return_row[idx] = (element.text,)\n else:\n # In this case, we found regex capture groups\n return_row[idx] = matched_capture\n else:\n # No regex capture groups b/c of no regex match...\n return_row[idx] = (None,)\n\n return_matrix.append(return_row)\n\n branches = return_matrix\n\n # We could have lost or created an extra branch if these aren't the\n # same length\n retval = list()\n if bool(empty_branches) is False:\n for branch in branches:\n ###############################################################\n # discard the branch if it contains None (element that did\n # not match)\n ###############################################################\n if not all(branch):\n continue\n retval.append(branch)\n else:\n retval = branches\n\n if reverse:\n retval.reverse()\n\n return retval\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def find_objects(self, linespec, exactmatch=False, ignore_ws=False, escape_chars=False, reverse=False):\n \"\"\"Find all :class:`~models_cisco.IOSCfgLine` objects whose text matches ``linespec`` and return the\n :class:`~models_cisco.IOSCfgLine` objects in a python list.\n\n :param linespec: A string or python regular expression, which should be matched\n :type linespec: Union[str,re.Pattern,BaseCfgLine]\n :param exactmatch: When set True, this option requires ``linespec`` match the whole configuration line, instead of a\n portion of the configuration line, default to False.\n :type exactmatch: str\n :param ignore_ws: Controls whether whitespace is ignored, default to False.\n :type ignore_ws: bool\n :param reverse: Controls whether the order of the results is reversed, default to False.\n :type reverse: bool\n :return: Matching :class:`~ciscoconfparse2.IOSCfgLine` objects.\n :rtype: List[BaseCfgLine]\n\n This example illustrates the use of :func:`~ciscoconfparse2.CiscoConfParse.find_objects`\n\n .. code-block:: python\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = [\n ... '!',\n ... 'interface Serial1/0',\n ... ' ip address 1.1.1.1 255.255.255.252',\n ... '!',\n ... 'interface Serial1/1',\n ... ' ip address 1.1.1.5 255.255.255.252',\n ... '!',\n ... ]\n >>> parse = CiscoConfParse(config=config)\n >>>\n >>> parse.find_objects(r'^interface')\n [<IOSCfgLine # 1 'interface Serial1/0'>, <IOSCfgLine # 4 'interface Serial1/1'>]\n >>>\n \"\"\"\n if isinstance(linespec, list):\n if len(linespec) == 1 and isinstance(linespec[0], (str, re.Pattern)):\n linespec = linespec[0]\n else:\n error = \"linespec list input must be exactly one string or compiled-regex long\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if escape_chars is True:\n ###################################################################\n # Escape regex to avoid embedded parenthesis problems\n ###################################################################\n linespec = re.escape(linespec)\n\n if self.config_objs.search_safe is False:\n error = \"The configuration has changed since the last commit; a config search is not safe.\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n if self.debug > 0:\n logger.info(\n \"find_objects('%s', exactmatch=%s) was called\" % (linespec, exactmatch),\n )\n\n if ignore_ws:\n linespec = build_space_tolerant_regex(linespec, encoding=self.encoding)\n\n if isinstance(linespec, str) or isinstance(linespec, re.Pattern):\n retval = self._find_line_OBJ(linespec, exactmatch)\n elif isinstance(linespec, BaseCfgLine):\n retval = list()\n for obj in self.objs:\n if obj == linespec:\n retval.append(obj)\n else:\n error = f\"linespec must be a string, re.Pattern, or BaseCfgLine instance; we got {type(linespec)}.\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if bool(reverse):\n retval.reverse()\n return retval\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def find_parent_objects(\n self,\n parentspec,\n childspec=None,\n ignore_ws=False,\n recurse=True,\n escape_chars=False,\n reverse=False,\n ):\n \"\"\"Return a list of parent :class:`~models_cisco.IOSCfgLine` objects,\n which matched the ``parentspec`` and whose children match ``childspec``.\n Only the parent :class:`~models_cisco.IOSCfgLine` objects will be\n returned.\n\n :param parentspec: Text regular expression for the :class:`~models_cisco.IOSCfgLine` object to be matched; this must match the parent's line\n :type parentspec: Union[str,List[str],tuple[str, ...]]\n :param childspec: Text regular expression for the child's configuration line\n :type childspec: str\n :param ignore_ws: boolean that controls whether whitespace is ignored\n :type ignore_ws: bool\n :param recurse: Set True if you want to search all children (children, grand children, great grand children, etc...). This is considered True if parentspec is a list or tuple.\n :type recurse: bool\n :param escape_chars: Set True if you want to escape characters before searching\n :type escape_chars: bool\n :param reverse: Set True if you want to reverse the order of the results\n :type reverse: bool\n :return: A list of matching parent :py:class:`~models_cisco.IOSCfgLine` objects\n :rtype: List[BaseCfgLine]\n\n .. warning::\n\n Do not set ``childspec`` if searching with a tuple of strings or list of strings.\n\n This example uses :py:meth:`~ciscoconfparse2.find_parent_objects()` to\n find all ports that are members of access vlan 300 in following\n config...\n\n .. parsed-literal::\n\n !\n interface FastEthernet0/1\n switchport access vlan 532\n spanning-tree vlan 532 cost 3\n !\n interface FastEthernet0/2\n switchport access vlan 300\n spanning-tree portfast\n !\n interface FastEthernet0/3\n duplex full\n speed 100\n switchport access vlan 300\n spanning-tree portfast\n !\n\n The following interfaces should be returned:\n\n .. parsed-literal::\n\n interface FastEthernet0/2\n interface FastEthernet0/3\n\n We do this by quering `find_objects_w_child()`; we set our\n parent as `^interface` and set the child as `switchport access\n vlan 300`.\n\n .. code-block:: python\n :emphasize-lines: 19,20\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = ['!',\n ... 'interface FastEthernet0/1',\n ... ' switchport access vlan 532',\n ... ' spanning-tree vlan 532 cost 3',\n ... '!',\n ... 'interface FastEthernet0/2',\n ... ' switchport access vlan 300',\n ... ' spanning-tree portfast',\n ... '!',\n ... 'interface FastEthernet0/3',\n ... ' duplex full',\n ... ' speed 100',\n ... ' switchport access vlan 300',\n ... ' spanning-tree portfast',\n ... '!',\n ... ]\n >>> p = CiscoConfParse(config=config)\n >>> p.find_parent_objects(['interface', 'vlan 300'])\n [<IOSCfgLine # 5 'interface FastEthernet0/2'>, <IOSCfgLine # 9 'interface FastEthernet0/3'>]\n >>>\n \"\"\"\n if self.config_objs.search_safe is False:\n error = \"The configuration has changed since the last commit; a config search is not safe.\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n if escape_chars is True:\n ###################################################################\n # Escape regex to avoid embedded parenthesis problems\n ###################################################################\n parentspec = re.escape(parentspec)\n childspec = re.escape(childspec)\n\n if isinstance(parentspec, BaseCfgLine):\n parentspec = parentspec.text\n elif isinstance(parentspec, str):\n pass\n elif isinstance(parentspec, (list, tuple)):\n _result = set()\n _tmp = self.find_object_branches(\n parentspec,\n )\n for _obj_branch in _tmp:\n # add the parent of that object branch to the result set\n _result.add(_obj_branch[0])\n\n if len(_result) == 0:\n ######################################################\n # If any _parentspec fails to match, we will hit this\n # condition when that failure happens.\n ######################################################\n return []\n else:\n # Sort and return the de-duplicated results\n return sorted(_result)\n else:\n error = f\"Received unexpected `parentspec` {type(parentspec)}\"\n logger.error(error)\n raise InvalidParameters(error)\n\n #######################################################################\n # Handle the case where parentspec is not a list or tuple\n #######################################################################\n if isinstance(childspec, BaseCfgLine):\n parentspec = childspec.text\n\n if ignore_ws:\n parentspec = build_space_tolerant_regex(parentspec, encoding=self.encoding)\n childspec = build_space_tolerant_regex(childspec, encoding=self.encoding)\n\n # Set escape_chars False to avoid double-escaping characters\n return list(\n filter(\n lambda x: x.re_search_children(childspec, recurse=recurse),\n self.find_objects(parentspec, ignore_ws=ignore_ws, escape_chars=False, reverse=reverse),\n ),\n )\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def find_parent_objects_wo_child(self, parentspec, childspec=None, ignore_ws=False, recurse=False, escape_chars=False, reverse=False):\n r\"\"\"Return a list of parent :class:`~models_cisco.IOSCfgLine` objects, which matched the ``parentspec`` and whose children did not match ``childspec``. Only the parent :class:`~models_cisco.IOSCfgLine` objects will be returned. For simplicity, this method only finds oldest_ancestors without immediate children that match.\n\n Parameters\n ----------\n parentspec : str\n Text regular expression for the :class:`~models_cisco.IOSCfgLine` object to be matched; this must match the parent's line\n childspec : str\n Text regular expression for the line to be matched; this must match the child's line\n ignore_ws : bool\n boolean that controls whether whitespace is ignored\n recurse : bool\n boolean that controls whether to recurse through children of children\n escape_chars : bool\n boolean that controls whether to escape characters before searching\n reverse : bool\n Set True if you want to reverse the order of the results\n\n Returns\n -------\n list\n A list of matching parent configuration lines\n\n Examples\n --------\n This example finds all ports that are autonegotiating in the following config...\n\n .. parsed-literal::\n\n !\n interface FastEthernet0/1\n switchport access vlan 532\n spanning-tree vlan 532 cost 3\n !\n interface FastEthernet0/2\n switchport access vlan 300\n spanning-tree portfast\n !\n interface FastEthernet0/2\n duplex full\n speed 100\n switchport access vlan 300\n spanning-tree portfast\n !\n\n The following interfaces should be returned:\n\n .. parsed-literal::\n\n interface FastEthernet0/1\n interface FastEthernet0/2\n\n We do this by quering ``find_parent_objects_wo_child()``; we set our\n parent as ``^interface`` and set the child as ``speed\\s\\d+`` (a\n regular-expression which matches the word 'speed' followed by\n an integer).\n\n .. code-block:: python\n :emphasize-lines: 19\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = ['!',\n ... 'interface FastEthernet0/1',\n ... ' switchport access vlan 532',\n ... ' spanning-tree vlan 532 cost 3',\n ... '!',\n ... 'interface FastEthernet0/2',\n ... ' switchport access vlan 300',\n ... ' spanning-tree portfast',\n ... '!',\n ... 'interface FastEthernet0/3',\n ... ' duplex full',\n ... ' speed 100',\n ... ' switchport access vlan 300',\n ... ' spanning-tree portfast',\n ... '!',\n ... ]\n >>> p = CiscoConfParse(config=config)\n >>> p.find_parent_objects_wo_child(r'^interface', r'speed\\s\\d+')\n [<IOSCfgLine # 1 'interface FastEthernet0/1'>, <IOSCfgLine # 5 'interface FastEthernet0/2'>]\n >>>\n \"\"\"\n if isinstance(parentspec, list):\n if len(parentspec) == 2 and isinstance(parentspec[0], (str, re.Pattern)) and isinstance(parentspec[1], (str, re.Pattern)):\n parentspec = parentspec[0]\n childspec = parentspec[1]\n else:\n error = \"list input must be exactly two strings or compiled-regex long\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if not isinstance(childspec, (str, re.Pattern)):\n error = \"childspec input must be a string or compiled-regex\"\n logger.critical(error)\n raise InvalidParameters(error)\n\n if self.config_objs.search_safe is False:\n error = \"The configuration has changed since the last commit; a config search is not safe.\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n if escape_chars is True:\n ###################################################################\n # Escape regex to avoid embedded parenthesis problems\n ###################################################################\n parentspec = re.escape(parentspec)\n childspec = re.escape(childspec)\n\n if isinstance(parentspec, BaseCfgLine):\n parentspec = parentspec.text\n elif isinstance(parentspec, (list, tuple)):\n ##################################################################\n # Catch unsupported parentspec type here\n ##################################################################\n error = f\"find_parent_objects_wo_child() `parentspec` does not support a {type(parentspec)}\"\n logger.error(error)\n raise InvalidParameters(error)\n if isinstance(childspec, BaseCfgLine):\n parentspec = childspec.text\n\n if ignore_ws is True:\n parentspec = build_space_tolerant_regex(parentspec, encoding=self.encoding)\n childspec = build_space_tolerant_regex(childspec, encoding=self.encoding)\n\n\n # Set escape_chars False to avoid double-escaping chars\n return [\n obj\n for obj in self.find_objects(parentspec, ignore_ws=ignore_ws, escape_chars=False, reverse=reverse)\n if not obj.re_search_children(childspec, recurse=recurse)\n ]\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def find_child_objects(\n self,\n parentspec,\n childspec=None,\n ignore_ws=False,\n recurse=True,\n escape_chars=False,\n reverse=False,\n ):\n r\"\"\"Parse through the children of all parents matching parentspec,\n and return a list of child objects, which matched the childspec.\n\n :param parentspec: Text regular expression for the parent's configuration line. A list is preferred.\n :type parentspec: Union[str, List[str], tuple[str, ...]]\n :param childspec: Text regular expression for the child's configuration line.\n :type parentspec: str\n :param ignore_ws: Ignore whitespace, default to False\n :type ignore_ws: bool\n :param recurse: Control whether to recurse in the config, default to True.\n :type recurse: bool\n :param escape_chars: Controls whether characters are escaped before searching, default to False.\n :type escape_chars: bool\n :param reverse: Controls whether results are reversed; set True if modifying the configuration with these results.\n :type reverse: bool\n :return: Matching child objects\n :rtype: List[BaseCfgLine]\n\n .. warning::\n\n Do not set ``childspec`` if searching with a tuple of strings or list of strings.\n\n This example finds the object for \"ge-0/0/0\" under \"interfaces\" in the\n following config...\n\n .. parsed-literal::\n\n interfaces\n ge-0/0/0\n unit 0\n family ethernet-switching\n port-mode access\n vlan\n members VLAN_FOO\n ge-0/0/1\n unit 0\n family ethernet-switching\n port-mode trunk\n vlan\n members all\n native-vlan-id 1\n vlan\n unit 0\n family inet\n address 172.16.15.5/22\n\n\n The following object should be returned:\n\n .. parsed-literal::\n\n <IOSCfgLine # 7 ' ge-0/0/1' (parent is # 0)>\n\n We do this by quering `find_child_objects()`; we set our\n parent as `^\\s*interface` and set the child as\n `^\\s+ge-0/0/1`.\n\n .. code-block:: python\n :emphasize-lines: 22,23\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = ['interfaces',\n ... ' ge-0/0/0',\n ... ' unit 0',\n ... ' family ethernet-switching',\n ... ' port-mode access',\n ... ' vlan',\n ... ' members VLAN_FOO',\n ... ' ge-0/0/1',\n ... ' unit 0',\n ... ' family ethernet-switching',\n ... ' port-mode trunk',\n ... ' vlan',\n ... ' members all',\n ... ' native-vlan-id 1',\n ... ' vlan',\n ... ' unit 0',\n ... ' family inet',\n ... ' address 172.16.15.5/22',\n ... ]\n >>> p = CiscoConfParse(config=config)\n >>> p.find_child_objects(['interface', r'ge-0/0/1'])\n [<IOSCfgLine # 7 ' ge-0/0/1' (parent is # 0)>]\n >>>\n \"\"\"\n if self.config_objs.search_safe is False:\n error = \"The configuration has changed since the last commit; a config search is not safe.\"\n logger.critical(error)\n raise NotImplementedError(error)\n\n if escape_chars is True:\n ###################################################################\n # Escape regex to avoid embedded parenthesis problems\n ###################################################################\n parentspec = re.escape(parentspec)\n childspec = re.escape(childspec)\n\n if isinstance(parentspec, BaseCfgLine):\n parentspec = parentspec.text\n elif isinstance(parentspec, str):\n pass\n elif isinstance(parentspec, (list, tuple)):\n if len(parentspec) > 0:\n _result = set()\n _tmp = self.find_object_branches(\n parentspec,\n )\n for _obj_branch in _tmp:\n # add the child of that object branch to the result set\n _result.add(_obj_branch[-1])\n\n if len(_result) == 0:\n ######################################################\n # If any _childspec fails to match, we will hit this\n # condition when that failure happens.\n ######################################################\n return []\n # Sort the de-duplicated results\n return sorted(_result)\n else:\n error = f\"`parentspec` {type(parentspec)} must have at least one element.\"\n logger.error(error)\n raise InvalidParameters(error)\n else:\n error = f\"Received unexpected `parentspec` {type(parentspec)}\"\n logger.error(error)\n raise InvalidParameters(error)\n\n #######################################################################\n # Handle the case where parentspec is not a list or tuple\n #######################################################################\n if isinstance(childspec, BaseCfgLine):\n parentspec = childspec.text\n\n if ignore_ws:\n parentspec = build_space_tolerant_regex(parentspec, encoding=self.encoding)\n childspec = build_space_tolerant_regex(childspec, encoding=self.encoding)\n\n retval = set()\n # Set escape_chars False to avoid double-escaping characters\n parents = self.find_objects(parentspec, ignore_ws=ignore_ws, escape_chars=False, reverse=reverse)\n if recurse is False:\n for parent in parents:\n ##############################################################\n # If recurse is False, only search direct children\n ##############################################################\n for child in parent.children:\n if child.re_match(rf\"({childspec})\", default=False):\n retval.add(child)\n else:\n for parent in parents:\n ##############################################################\n # If recurse is True, search all children including children\n # of the children\n ##############################################################\n for child in parent.all_children:\n if child.re_match(rf\"({childspec})\", default=False):\n retval.add(child)\n\n return sorted(retval)\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def re_search_children(self, regexspec, recurse=False):\n \"\"\"Use ``regexspec`` to search for root parents in the config with text matching regex. If `recurse` is False, only root parent objects are returned. A list of matching objects is returned.\n\n This method is very similar to :func:`~ciscoconfparse2.CiscoConfParse.find_objects` (when `recurse` is True); however it was written in response to the use-case described in `Github Issue #156 <https://github.com/mpenning/ciscoconfparse/issues/156>`_.\n\n Parameters\n ----------\n regexspec : str\n A string or python regular expression, which should be matched.\n recurse : bool\n Set True if you want to search all objects, and not just the root parents\n\n Returns\n -------\n list\n A list of matching :class:`~models_cisco.IOSCfgLine` objects which matched. If there is no match, an empty :py:func:`list` is returned.\n\n \"\"\"\n ## I implemented this method in response to Github issue #156\n if recurse is False:\n # Only return the matching oldest ancestor objects...\n return [obj for obj in self.find_objects(regexspec) if (obj.parent is obj)]\n else:\n # Return any matching object\n return [obj for obj in self.find_objects(regexspec)]\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def re_match_iter_typed(\n self,\n regexspec,\n group=1,\n result_type=str,\n default=\"\",\n untyped_default=False,\n ):\n r\"\"\"Use ``regexspec`` to search the root parents in the config\n and return the contents of the regular expression group, at the\n integer ``group`` index, cast as ``result_type``; if there is no\n match, ``default`` is returned.\n\n Note\n ----\n Only the first regex match is returned.\n\n Parameters\n ----------\n regexspec : str\n A string or python compiled regular expression, which should be matched. This regular expression should contain parenthesis, which bound a match group.\n group : int\n An integer which specifies the desired regex group to be returned. ``group`` defaults to 1.\n result_type : type\n A type (typically one of: ``str``, ``int``, ``float``, or :class:`~ccp_util.IPv4Obj`). All returned values are cast as ``result_type``, which defaults to ``str``.\n default : any\n The default value to be returned, if there is no match. The default is an empty string.\n untyped_default : bool\n Set True if you don't want the default value to be typed\n\n Returns\n -------\n ``result_type``\n The text matched by the regular expression group; if there is no match, ``default`` is returned. All values are cast as ``result_type``. The default result_type is `str`.\n\n\n Examples\n --------\n This example illustrates how you can use\n :func:`~ciscoconfparse2.re_match_iter_typed` to get the\n first interface name listed in the config.\n\n >>> import re\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = [\n ... '!',\n ... 'interface Serial1/0',\n ... ' ip address 1.1.1.1 255.255.255.252',\n ... '!',\n ... 'interface Serial2/0',\n ... ' ip address 1.1.1.5 255.255.255.252',\n ... '!',\n ... ]\n >>> parse = CiscoConfParse(config=config)\n >>> parse.re_match_iter_typed(r'interface\\s(\\S+)')\n 'Serial1/0'\n >>>\n\n The following example retrieves the hostname from the configuration\n\n >>> from ciscoconfparse2 import CiscoConfParse\n >>> config = [\n ... '!',\n ... 'hostname DEN-EDGE-01',\n ... '!',\n ... 'interface Serial1/0',\n ... ' ip address 1.1.1.1 255.255.255.252',\n ... '!',\n ... 'interface Serial2/0',\n ... ' ip address 1.1.1.5 255.255.255.252',\n ... '!',\n ... ]\n >>> parse = CiscoConfParse(config=config)\n >>> parse.re_match_iter_typed(r'^hostname\\s+(\\S+)')\n 'DEN-EDGE-01'\n >>>\n\n \"\"\"\n ## iterate through root objects, and return the matching value\n ## (cast as result_type) from the first object.text that matches regex\n\n # if (default is True):\n ## Not using self.re_match_iter_typed(default=True), because I want\n ## to be sure I build the correct API for match=False\n ##\n ## Ref IOSIntfLine.has_dtp for an example of how to code around\n ## this while I build the API\n # raise NotImplementedError\n\n for cobj in self.config_objs:\n # Only process parent objects at the root of the tree...\n if cobj.parent is not cobj:\n continue\n\n mm = re.search(regexspec, cobj.text)\n if mm is not None:\n return result_type(mm.group(group))\n ## Ref Github issue #121\n if untyped_default:\n return default\n else:\n return result_type(default)\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def save_as(self, filepath):\n \"\"\"Save a text copy of the configuration at ``filepath``; this\n method uses the OperatingSystem's native line separators (such as\n ``\\\\r\\\\n`` in Windows).\"\"\"\n try:\n with open(filepath, \"w\", encoding=self.encoding) as newconf:\n for line in self.as_text:\n newconf.write(line + \"\\n\")\n return True\n except BaseException as ee:\n logger.error(str(ee))\n raise ee\n\n ### The methods below are marked SEMI-PRIVATE because they return an object\n ### or iterable of objects instead of the configuration text itself.\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def _find_line_OBJ(self, linespec, exactmatch=False):\n \"\"\"SEMI-PRIVATE: Find objects whose text matches the linespec\"\"\"\n\n if self.config_objs is None:\n err = \"config_objs is None. self.config_objs logic failed.\"\n raise ValueError(err)\n\n if self.debug >= 2:\n logger.debug(\n \"Looking for match of linespec='%s', exactmatch=%s\"\n % (linespec, exactmatch),\n )\n\n # NOTE TO SELF: do not remove _find_line_OBJ(); used by Cisco employees\n if not exactmatch:\n # Return objects whose text attribute matches linespec\n linespec_re = re.compile(linespec)\n elif exactmatch:\n # Return objects whose text attribute matches linespec exactly\n linespec_re = re.compile(\"^%s$\" % linespec)\n\n return list(\n filter(lambda obj: linespec_re.search(obj.text), self.config_objs),\n )\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def _find_sibling_OBJ(self, lineobject):\n \"\"\"SEMI-PRIVATE: Takes a singe object and returns a list of sibling\n objects\"\"\"\n siblings = lineobject.parent.children\n return siblings\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def _find_all_child_OBJ(self, lineobject):\n \"\"\"SEMI-PRIVATE: Takes a single object and returns a list of\n decendants in all 'children' / 'grandchildren' / etc... after it.\n It should NOT return the children of siblings\"\"\"\n # sort the list, and get unique objects\n retval = set(lineobject.children)\n for candidate in lineobject.children:\n if candidate.has_children:\n for child in candidate.children:\n retval.add(child)\n retval = sorted(retval)\n return retval\n\n # This method is on CiscoConfParse()\n @logger.catch(reraise=True)\n def _unique_OBJ(self, objectlist):\n \"\"\"SEMI-PRIVATE: Returns a list of unique objects (i.e. with no\n duplicates).\n The returned value is sorted by configuration line number\n (lowest first)\"\"\"\n retval = set()\n for obj in objectlist:\n retval.add(obj)\n return sorted(retval)" }, { "identifier": "PythonOptimizeCheck", "path": "ciscoconfparse2/ccp_util.py", "snippet": "class PythonOptimizeCheck(object):\n \"\"\"\n Check if we're running under \"python -O ...\". The -O option removes\n all `assert` statements at runtime. ciscoconfparse2 depends heavily on\n `assert` and running ciscoconfparse2 under python -O is a really bad idea.\n\n __debug__ is True unless run with `python -O ...`. __debug__ is False\n under `python -O ...`.\n\n Also throw an error if PYTHONOPTIMIZE is set in the windows or unix shell.\n\n This class should be run in <module_name_dir>/__init__.py.\n\n This condition is not unique to ciscoconfparse2.\n\n Simple usage (in __init__.py):\n ------------------------------\n\n # Handle PYTHONOPTIMIZE problems...\n from ciscoconfparse2.ccp_util import PythonOptimizeCheck\n _ = PythonOptimizeCheck()\n\n\n \"\"\"\n @logger.catch(reraise=True)\n def __init__(self):\n\n self.PYTHONOPTIMIZE_env_value = os.environ.get(\"PYTHONOPTIMIZE\", None)\n\n error = \"__no_error__\"\n try:\n # PYTHONOPTIMIZE is not supported... in the linux shell\n # disable it with `unset PYTHONOPTIMIZE`\n if isinstance(self.PYTHONOPTIMIZE_env_value, str) and self.PYTHONOPTIMIZE_env_value.strip() != \"\":\n # This condition explicitly allows PYTHONOPTIMIZE=\"\", which\n # is not a problem.\n error = \"Your environment has PYTHONOPTIMIZE set. ciscoconfparse2 doesn't support running under PYTHONOPTIMIZE.\"\n # PYTHONOPTIMIZE is not supported... in the linux shell\n # disable it with `unset PYTHONOPTIMIZE`\n elif self.PYTHONOPTIMIZE_env_value is not None:\n error = \"Your environment has PYTHONOPTIMIZE set. ciscoconfparse2 doesn't support running under PYTHONOPTIMIZE.\"\n # Throw an error if we're running under `python -O`. `python -O` is not supported\n # We should keep the __debug__ check for `-O` at the end, otherwise it\n # masks identifying problems with PYTHONOPTIMIZE set in the shell...\n elif __debug__ is False:\n # Running under 'python -O'\n error = \"You're using `python -O`. Please don't. ciscoconfparse2 doesn't support `python -O`\"\n\n else:\n # whew...\n pass\n\n except Exception as exception_info:\n print(\"exception_info\", str(exception_info))\n raise RuntimeError(\"Something bad happened in PYTHONOPTIMIZE checks. Please report this problem as a ciscoconfparse2 bug\")\n\n if error != \"__no_error__\":\n raise PythonOptimizeException(error)" } ]
import platform import sys import os import dns.exception import dns.resolver import pytest from ciscoconfparse2.ciscoconfparse2 import CiscoConfParse from ciscoconfparse2.ccp_util import PythonOptimizeCheck from fixtures.devices.mock_cisco import start_cisco_mock, stop_cisco_mock
18,528
tacacs-server host 10.0.0.32 tacacs-server host 10.0.0.33 aaa group server tacacs+ TACACS_GROUP server 10.0.0.32 server 10.0.0.33 use-vrf management source-interface mgmt0 aaa authentication login default group TACACS_GROUP aaa authentication login console group TACACS_GROUP aaa authorization commands default group TACACS_GROUP aaa accounting default group TACACS_GROUP aaa authentication login error-enable logging event link-status default vpc domain 999 role priority 100 system-priority 1 auto-recovery peer-keepalive destination 1.1.1.2 fex 115 desc FEX115 pinning max-links 1 interface loopback0 ip address 10.1.1.1/32 interface mgmt0 ip address 10.0.0.5/24 interface port-channel1 vpc peer-link switchport mode trunk spanning-tree port type network description [vPC PEER LINK] interface port-channel21 description Uplink to core switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,155 mtu 9216 vpc 21 interface port-channel115 switchport mode fex-fabric fex associate 115 interface Ethernet1/1 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/2 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/3 ip address 192.0.2.0/31 interface Ethernet1/4 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/5 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/6 switchport mode fex-fabric fex associate 115 channel-group 115 interface Ethernet1/7 switchport mode access switchport access vlan 100 mtu 9216 interface Ethernet1/8 switchport mode access switchport access vlan 102 mtu 9216 interface Ethernet1/9 ip address 10.1.2.6/30 mtu 9216 interface Ethernet1/10 encapsulation dot1Q 200 bandwidth 100000000 delay 200 beacon ip address 10.1.2.2/30 mpls ip mtu 9216 """.splitlines() @pytest.fixture(scope="session") def c01_default_gigethernets(request): yield config_c01_default_gige @pytest.fixture(scope="session") def c01_insert_serial_replace(request): yield config_c01_insert_serial_replace @pytest.fixture(scope="function") def parse_c01(request): """Preparsed c01"""
r""" conftest.py - Parse, Query, Build, and Modify IOS-style configs Copyright (C) 2023 David Michael Pennington at Cisco Systems This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. If you need to contact the author, you can do so by emailing: mike [~at~] pennington [/dot\] net """ sys.path.insert(0, "..") c01 = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface Serial 1/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 ! interface GigabitEthernet4/6 switchport switchport access vlan 110 ! interface GigabitEthernet4/7 switchport switchport access vlan 110 ! interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() config_c01_default_gige = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface Serial 1/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! default interface GigabitEthernet4/1 interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! default interface GigabitEthernet4/2 interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! default interface GigabitEthernet4/3 interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! default interface GigabitEthernet4/4 interface GigabitEthernet4/4 shutdown ! default interface GigabitEthernet4/5 interface GigabitEthernet4/5 switchport switchport access vlan 110 ! default interface GigabitEthernet4/6 interface GigabitEthernet4/6 switchport switchport access vlan 110 ! default interface GigabitEthernet4/7 interface GigabitEthernet4/7 switchport switchport access vlan 110 ! default interface GigabitEthernet4/8 interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() config_c01_insert_serial_replace = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! default interface Serial 2/0 interface Serial 2/0 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/3 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 ! interface GigabitEthernet4/6 switchport switchport access vlan 110 ! interface GigabitEthernet4/7 switchport switchport access vlan 110 ! interface GigabitEthernet4/8 switchport switchport access vlan 110 ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C alias exec showthang show ip route vrf THANG""".splitlines() # A smaller version of c01... c02 = """policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! interface GigabitEthernet4/1 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 !""".splitlines() ## For historical reasons, I'm use c03 for configs/sample_01.ios (i.e. c01 was ## already taken) c03 = """! service timestamps debug datetime msec localtime show-timezone service timestamps log datetime msec localtime show-timezone ! errdisable recovery cause bpduguard errdisable recovery interval 400 ! aaa new-model ! ip vrf TEST_100_001 route-target 100:1 rd 100:1 ! interface Serial 1/0 description Uplink to SBC F923X2K425 bandwidth 1500 clock rate 1500 delay 70 encapsulation ppp ip address 1.1.1.1 255.255.255.252 ! interface Serial 1/1 description Uplink to AT&T encapsulation hdlc ip address 1.1.1.9 255.255.255.254 hold-queue 1000 in hold-queue 1000 out mpls mtu 1540 ip mtu 1500 mpls ip ! interface GigabitEthernet4/1 description switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 ! interface GigabitEthernet4/2 switchport switchport access vlan 100 switchport voice vlan 150 power inline static max 7000 speed 100 duplex full ! interface GigabitEthernet4/3 mtu 9216 switchport switchport access vlan 100 switchport voice vlan 150 ! interface GigabitEthernet4/4 shutdown ! interface GigabitEthernet4/5 switchport switchport access vlan 110 switchport port-security switchport port-security maximum 3 switchport port-security mac-address sticky switchport port-security mac-address 1000.2000.3000 switchport port-security mac-address 1000.2000.3001 switchport port-security mac-address 1000.2000.3002 switchport port-security violation shutdown ! interface GigabitEthernet4/6 description Simulate a Catalyst6500 access port switchport switchport access vlan 110 switchport mode access switchport nonegotiate switchport port-security switchport port-security maximum 2 switchport port-security violation restrict switchport port-security aging type inactivity switchport port-security aging time 5 spanning-tree portfast spanning-tree portfast bpduguard storm-control action shutdown storm-control broadcast level 0.40 storm-control multicast level 0.35 ! interface GigabitEthernet4/7 description Dot1Q trunk allowing vlans 2-4,7,10,11-19,21-4094 switchport switchport trunk encapsulation dot1q switchport mode trunk switchport trunk native vlan 4094 switchport trunk allowed vlan remove 1,5-10,20 switchport trunk allowed vlan add 7,10 switchport nonegotiate ! interface GigabitEthernet4/8.120 no switchport encapsulation dot1q 120 ip vrf forwarding TEST_100_001 ip address 1.1.2.254 255.255.255.0 ! interface ATM5/0/0 no ip address no ip redirects no ip unreachables no ip proxy-arp load-interval 30 carrier-delay msec 100 no atm ilmi-keepalive bundle-enable max-reserved-bandwidth 100 hold-queue 500 in ! interface ATM5/0/0.32 point-to-point ip address 1.1.1.5 255.255.255.252 no ip redirects no ip unreachables no ip proxy-arp ip accounting access-violations pvc 0/32 vbr-nrt 704 704 ! interface ATM5/0/1 shutdown ! router ospf 100 vrf TEST_100_001 router-id 1.1.2.254 network 1.1.2.0 0.0.0.255 area 0 ! policy-map QOS_1 class GOLD priority percent 10 ! class SILVER bandwidth 30 random-detect ! class BRONZE random-detect ! access-list 101 deny tcp any any eq 25 log access-list 101 permit ip any any ! ! logging 1.1.3.5 logging 1.1.3.17 ! banner login ^C This is a router, and you cannot have it. Log off now while you still can type. I break the fingers of all tresspassers. ^C ! alias exec showthang show ip route vrf THANG""".splitlines() f01 = """ltm virtual ACME { destination 192.168.1.191:http ip-protocol tcp mask 255.255.255.255 pool pool1 profiles { http { } tcp { } } rules { MOBILE } source 0.0.0.0/0 source-address-translation { type automap } translate-address enabled translate-port enabled vs-index 17 }""".splitlines() # Using configs/sample_02.f5 f02 = """ ltm profile udp DNS-UDP { app-service none datagram-load-balancing disabled idle-timeout 31 } ltm rule contrail-monitor { when HTTP_REQUEST { if {[active_members APN-DNS-TCP] > 0 & [active_members APN-DNS-UDP] > 0 } { HTTP::respond 200 content "up" } } } ltm rule contrail-monitor1 { when HTTP_REQUEST { if {[active_members APN-DNS-TCP] >= 0 & [active_members APN-DNS-UDP] >= 0 } { HTTP::respond 200 content "up" } } } ltm tacdb licenseddb licensed-tacdb { partition none } ltm virtual ACME_VIP { destination 192.168.1.191:http ip-protocol tcp mask 255.255.255.255 pool pool1 profiles { http { } tcp { } } rules { MOBILE } source 0.0.0.0/0 source-address-translation { type automap } translate-address enabled translate-port enabled vs-index 17 } sys state-mirroring { } sys syslog { include " template t_remotetmpl { template (\"<$PRI>$STAMP $HOST $FACILITY[$PID]: $MSGONLY\"); template_escape(no); }; filter f_remote_loghost { level(info..emerg); }; destination d_remote_loghost { udp(\"102.223.51.181\" port(519) template(t_remotetmpl)); }; log { source(s_syslog_pipe); filter(f_remote_loghost); destination(d_remote_loghost); }; " remote-servers { JSA { host 102.223.51.181 } } } sys url-db download-schedule urldb { } """.splitlines() j01 = """## Last commit: 2015-06-28 13:00:59 CST by mpenning system { host-name TEST01_EX; domain-name pennington.net; domain-search [ pennington.net lab.pennington.net ]; location { country-code 001; building HQ_005; floor 1; } root-authentication { encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA } name-server { 172.16.3.222; } login { announcement "Test Lab Switch"; message "Unauthorized access is prohibited"; user mpenning { full-name "Mike Pennington"; uid 1000; class super-user; authentication { encrypted-password "$1$y7ArHxKU$zUbdeLfBirgkCsKiOJ5Qa0"; ## SECRET-DATA } } } services { ssh { root-login allow; } telnet; web-management { http; } } syslog { user * { any emergency; } file messages { any notice; authorization info; } file interactive-commands { interactive-commands any; } } ntp { Management { vlan-id 1; interface { ge-0/0/0.0; ge-0/0/1.0; ge-0/0/2.0; ge-0/0/3.0; } } VLAN_FOO { vlan-id 5; } vlan1 { vlan-id 1; l3-interface vlan.1; } vlan800 { vlan-id 800; } } ethernet-switching-options { storm-control { interface all; } } interfaces { ge-0/0/0 { unit 0 { family ethernet-switching { port-mode access; vlan { members VLAN_FOO; } } } } ge-0/0/1 { unit 0 { family ethernet-switching { port-mode trunk; vlan { members all; } native-vlan-id 1; } } } vlan { unit 0 { family inet { address 172.16.15.5/22; } } } } routing-options { static { route 0.0.0.0/0 next-hop 172.16.12.1; route 192.168.36.0/25 next-hop 172.16.12.1; } }""".splitlines() a01 = """hostname TEST-FW ! name 1.1.2.20 loghost01 name 1.1.3.10 dmzsrv00 name 1.1.3.11 dmzsrv01 name 1.1.3.12 dmzsrv02 name 1.1.3.13 dmzsrv03 ! interface Ethernet0/0 description Uplink to SBC F923X2K425 nameif OUTSIDE security-level 0 delay 70 ip address 1.1.1.1 255.255.255.252 ! interface Ethernet0/1 nameif INSIDE security-level 100 ip address 1.1.2.1 255.255.255.0 ! interface Ethernet0/2 switchport access vlan 100 ! interface VLAN100 nameif DMZ security-level 50 ip address 1.1.3.1 255.255.255.0 ! object-group network ANY_addrs network-object 0.0.0.0 0.0.0.0 ! object-group network INSIDE_addrs1 network-object host 1.1.2.1 network-object 1.1.2.2 255.255.255.255 network-object 1.1.2.0 255.255.255.0 ! object-group network INSIDE_addrs1 network-object host 1.1.2.1 network-object 1.1.2.2 255.255.255.255 network-object 1.1.2.0 255.255.255.0 ! object-group service DNS_svc service-object udp destination eq dns ! object-group service NTP_svc service-object udp destination eq ntp ! object-group service FTP_svc service-object tcp destination eq ftp ! object-group service HTTP_svc service-object tcp destination eq http ! object-group service HTTPS_svc service-object tcp destination eq https ! access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs1 object-group ANY_addrs log access-list INSIDE_in remark Overlap for test purposes access-list INSIDE_in extended permit ip object-group INSIDE_addrs1 object-group ANY_addrs log access-list INSIDE_in extended deny ip any any log ! ! clock timezone CST -6 clock summer-time CDT recurring ! logging enable logging timestamp logging buffer-size 1048576 logging buffered informational logging trap informational logging asdm informational logging facility 22 logging host INSIDE loghost01 no logging message 302021 ! access-group OUTSIDE_in in interface OUTSIDE access-group INSIDE_in in interface INSIDE !""".splitlines() a02 = """: Saved : Written by mpenning at 05:37:43.184 CDT Sun Jun 29 2015 ! ASA Version 9.0(3) ! command-alias exec slog show log | i Deny|denied command-alias exec sacl sh access-list INSIDE_out | e hitcnt=0 |remark|elements hostname fw domain-name pennington.net enable password 2KFQnbNIdI.2KYOU encrypted xlate per-session deny tcp any4 any4 xlate per-session deny tcp any4 any6 xlate per-session deny tcp any6 any4 xlate per-session deny tcp any6 any6 xlate per-session deny udp any4 any4 eq domain xlate per-session deny udp any4 any6 eq domain xlate per-session deny udp any6 any4 eq domain xlate per-session deny udp any6 any6 eq domain passwd 2KFQnbNIdI.2KYOU encrypted names name 192.0.2.13 Machine01 description machine01 name 192.0.2.17 Machine02_Windows name 10.0.0.6 Machine03 name 74.125.130.125 GTalk01 description Google talk server name 74.125.134.125 GTalk02 description Google talk server name 74.125.139.125 GTalk03 description Google Talk server name 74.125.142.125 GTalk04 description Google Talk server name 74.125.192.125 GTalk05 description Google Talk server name 74.125.140.125 GTalk06 description Google Talk server name 74.125.137.125 GTalk07 name 74.125.138.125 GTalk08 name 74.125.141.125 GTalk09 name 74.125.136.125 GTalk10 name 74.125.135.125 GTalk11 name 108.160.160.0 AS19679_Dropbox__108-160-160-0__20 name 199.47.216.0 AS19679_Dropbox__199.47.216.0__22 name 173.194.64.109 GmailSMTP01 name 173.194.64.108 GmailSMTP02 name 128.223.51.103 route-views.oregon-ix.net description Route-Views route server ip local pool SSL_VPN_ADDRS 10.1.1.240-10.1.1.241 mask 255.255.255.0 ! interface Ethernet0/0 description Internet ISP switchport access vlan 100 ! interface Ethernet0/1 switchport access vlan 200 ! interface Ethernet0/2 switchport access vlan 200 shutdown ! interface Ethernet0/3 switchport access vlan 200 ! interface Ethernet0/4 switchport access vlan 200 ! interface Ethernet0/5 switchport access vlan 200 ! interface Ethernet0/6 switchport access vlan 200 ! interface Ethernet0/7 shutdown ! interface Vlan1 no nameif no security-level no ip address ! interface Vlan100 mac-address 0030.dead.beef nameif OUTSIDE security-level 0 ip address dhcp setroute ! interface Vlan200 nameif INSIDE security-level 100 ip address 192.0.2.1 255.255.255.0 ! banner motd banner motd Test banner for $(hostname) banner motd banner motd ******************************* boot system disk0:/asa903-k8.bin ftp mode passive clock timezone CST -6 clock summer-time CDT recurring dns domain-lookup INSIDE dns server-group DefaultDNS name-server Machine01 domain-name pennington.net object network GTalk01 host 74.125.130.125 description Created during name migration object network GTalk02 host 74.125.134.125 description Created during name migration object network GTalk03 host 74.125.139.125 description Created during name migration object network GTalk04 host 74.125.142.125 description Created during name migration object network GTalk05 host 74.125.192.125 description Created during name migration object network GTalk06 host 74.125.140.125 description Created during name migration object network GTalk07 host 74.125.137.125 description Created during name migration object network GTalk08 host 74.125.138.125 description Created during name migration object network GTalk09 host 74.125.141.125 description Created during name migration object network GTalk10 host 74.125.136.125 description Created during name migration object network GTalk11 host 74.125.135.125 description Created during name migration object network AS19679_Dropbox__108-160-160-0__20 subnet 108.160.160.0 255.255.240.0 description Created during name migration object network AS19679_Dropbox__199.47.216.0__22 subnet 199.47.216.0 255.255.252.0 description Created during name migration object network Machine01 host 192.0.2.5 description Created during name migration object network obj_any subnet 0.0.0.0 0.0.0.0 object network Machine02_Windows host 192.0.2.17 description Created during name migration object-group network GoogleTalk network-object object GTalk01 network-object object GTalk02 network-object object GTalk03 network-object object GTalk04 network-object object GTalk05 network-object object GTalk06 network-object object GTalk07 network-object object GTalk08 network-object object GTalk09 network-object object GTalk10 network-object object GTalk11 object-group service GoogleTalkPorts service-object tcp destination eq 5222 service-object tcp destination eq https service-object udp destination range 19302 19309 object-group network Inside network-object 192.0.2.0 255.255.255.0 network-object 192.0.22.0 255.255.255.0 network-object 192.0.23.0 255.255.255.0 object-group network DROPBOX_AS19679 network-object object AS19679_Dropbox__108-160-160-0__20 network-object object AS19679_Dropbox__199.47.216.0__22 object-group network GOOGLE_addrs description dig -t TXT _netblocks.google.com 8.8.8.8 network-object 216.239.32.0 255.255.224.0 network-object 64.233.160.0 255.255.224.0 network-object 66.249.80.0 255.255.240.0 network-object 72.14.192.0 255.255.192.0 network-object 209.85.128.0 255.255.128.0 network-object 66.102.0.0 255.255.240.0 network-object 74.125.0.0 255.255.0.0 network-object 64.18.0.0 255.255.240.0 network-object 207.126.144.0 255.255.240.0 network-object 173.194.0.0 255.255.0.0 object-group network SSH_addrs network-object 192.168.1.0 255.255.255.0 object-group network ANY_addrs network-object 0.0.0.0 0.0.0.0 object-group network INSIDE_addrs network-object 192.0.2.0 255.255.255.0 network-object 10.0.0.0 255.0.0.0 object-group service GOOGLE_svc description Google's push service for Android service-object tcp destination eq www service-object tcp destination eq https service-object tcp destination eq 5228 service-object tcp destination eq 5222 service-object tcp destination eq 587 object-group service TELNET_svc service-object tcp destination eq telnet object-group service WHOIS_svc service-object tcp destination eq whois object-group service SSH_svc service-object tcp destination eq ssh object-group service WEB_svc description Standard web services - http, https, ftp service-object tcp destination eq ftp service-object tcp destination eq www service-object tcp destination eq https service-object icmp object-group service DNS_svc service-object udp destination eq domain service-object tcp destination eq domain object-group network MACHINE01_addrs network-object object Machine01 object-group service ANDROID_svc description Google's push service for Android service-object tcp destination eq 5228 object-group service GMAILSMTP_svc service-object tcp destination eq 2525 object-group service NTP_svc service-object udp destination eq ntp object-group service SKYPE_svc service-object udp destination eq 5555 object-group service XBOX_svc service-object tcp destination eq domain service-object udp destination eq domain service-object udp destination eq 88 service-object tcp destination eq 3074 service-object udp destination eq 3074 object-group network ANY object-group service NaverLine_svc service-object udp destination eq 11000 service-object udp destination range 9401 9405 object-group network NaverLine_addrs network-object 174.35.127.0 255.255.255.0 object-group network Facebook_addrs network-object 66.220.144.0 255.255.240.0 network-object 69.63.176.0 255.255.248.0 network-object 69.63.184.0 255.255.248.0 network-object 69.171.224.0 255.255.240.0 network-object 69.171.239.0 255.255.255.0 network-object 69.171.240.0 255.255.240.0 network-object 69.171.253.0 255.255.255.0 network-object 69.171.255.0 255.255.255.0 network-object 74.119.76.0 255.255.252.0 network-object 103.4.96.0 255.255.252.0 network-object 173.252.64.0 255.255.192.0 network-object 204.15.20.0 255.255.252.0 network-object 31.13.24.0 255.255.248.0 network-object 31.13.64.0 255.255.192.0 network-object 31.13.96.0 255.255.224.0 object-group service IP_SLA_PathTrace_svc service-object udp destination range 33400 33499 object-group service FTP_svc service-object tcp destination eq ftp object-group service TeamViewerPorts service-object tcp destination eq 5938 object-group service SSLVPN_svc service-object udp destination eq 443 object-group service TEST_PORTS tcp port-object eq domain port-object eq smtp access-list SPLIT_TUNNEL_NETS remark [[ destinations available via the VPN ]] access-list SPLIT_TUNNEL_NETS standard permit 192.0.2.0 255.255.255.0 access-list NO_SSLVPN_NAT remark [[ prevent inadvertent nat of sslvpn traffic ]] access-list NO_SSLVPN_NAT extended permit ip 192.0.2.0 255.255.255.0 192.0.2.0 255.255.255.0 access-list INSIDE_in extended deny object-group SKYPE_svc object-group INSIDE_addrs object-group ANY_addrs log disable access-list INSIDE_in extended permit object-group GOOGLE_svc object-group INSIDE_addrs object-group GOOGLE_addrs log access-list INSIDE_in extended permit object-group ANDROID_svc object-group INSIDE_addrs object-group GOOGLE_addrs log access-list INSIDE_in extended permit object-group IP_SLA_PathTrace_svc any host 4.2.2.2 log access-list INSIDE_in extended permit object-group DNS_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group NTP_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group TELNET_svc object-group INSIDE_addrs host 128.223.51.103 log access-list INSIDE_in extended permit object-group FTP_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group WEB_svc object-group INSIDE_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group SSH_svc object-group INSIDE_addrs object-group SSH_addrs log access-list INSIDE_in extended permit object-group GMAILSMTP_svc object-group TSUNAMI_addrs object-group ANY_addrs log access-list INSIDE_in extended permit object-group WHOIS_svc object-group TSUNAMI_addrs object-group ANY_addrs log access-list INSIDE_in extended deny ip any4 any4 log access-list ANY extended permit ip object-group Inside any4 access-list ANY extended permit ip any4 object-group Inside access-list VOIP extended permit object-group GoogleTalkPorts object-group Inside object-group GoogleTalk access-list VOIP extended permit object-group GoogleTalkPorts object-group GoogleTalk object-group Inside access-list MAINTENANCE extended deny ip any4 any4 log access-list OUTSIDE_in extended deny ip host 4.2.2.2 any4 log access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 unreachable log interval 1 access-list OUTSIDE_in extended permit icmp any4 0.0.0.0 0.0.0.0 time-exceeded log interval 1 access-list OUTSIDE_in extended deny ip any4 any4 log pager lines 23 logging enable logging timestamp logging buffer-size 1048576 logging buffered informational logging trap informational logging asdm informational logging facility 22 logging host INSIDE Machine01 logging class sys buffered informational no logging message 302021 no logging message 302020 mtu OUTSIDE 1500 mtu INSIDE 1500 ip verify reverse-path interface INSIDE icmp unreachable rate-limit 1 burst-size 1 asdm image disk0:/asdm-645.bin no asdm history enable arp timeout 14400 no arp permit-nonconnected ! object network obj_any nat (INSIDE,OUTSIDE) dynamic interface access-group OUTSIDE_in in interface OUTSIDE access-group INSIDE_in in interface INSIDE route INSIDE 10.0.0.0 255.0.0.0 192.0.2.2 1 timeout xlate 3:00:00 timeout pat-xlate 0:00:30 timeout conn 1:00:00 half-closed 0:59:00 udp 0:02:00 icmp 0:00:02 timeout sunrpc 0:10:00 h323 0:05:00 h225 1:00:00 mgcp 0:05:00 mgcp-pat 0:05:00 timeout sip 0:30:00 sip_media 0:02:00 sip-invite 0:03:00 sip-disconnect 0:02:00 timeout sip-provisional-media 0:02:00 uauth 0:05:00 absolute timeout tcp-proxy-reassembly 0:01:00 timeout floating-conn 0:00:00 dynamic-access-policy-record DfltAccessPolicy user-identity default-domain LOCAL aaa authentication ssh console LOCAL aaa authentication enable console LOCAL aaa authentication http console LOCAL aaa authorization command LOCAL aaa local authentication attempts max-fail 16 filter java 1-65535 192.0.2.0 255.255.255.0 0.0.0.0 0.0.0.0 http server enable http 192.0.2.0 255.255.255.0 INSIDE snmp-server host INSIDE Machine01 poll community public snmp-server location ServerRoom snmp-server contact [email protected] snmp-server community public snmp-server enable traps snmp authentication linkup linkdown coldstart crypto ipsec security-association pmtu-aging infinite crypto ca trustpoint LOCAL_CERT_fw enrollment self fqdn fw.pennington.net subject-name CN=fw.pennington.net crl configure crypto ca trustpool policy telnet timeout 5 ssh scopy enable ssh 192.0.2.0 255.255.255.0 INSIDE ssh 10.0.0.0 255.0.0.0 INSIDE ssh timeout 60 ssh version 2 console timeout 5 no vpn-addr-assign aaa no vpn-addr-assign dhcp dhcpd dns 68.94.156.1 Machine01 dhcpd lease 604800 dhcpd domain pennington.net dhcpd auto_config OUTSIDE ! threat-detection basic-threat threat-detection scanning-threat shun duration 30 threat-detection statistics host threat-detection statistics port threat-detection statistics protocol threat-detection statistics access-list no threat-detection statistics tcp-intercept ntp server 17.151.16.20 ntp server 17.151.16.21 ntp server 17.151.16.22 ntp server 17.151.16.23 group-policy SSL_VPN_Policy01 internal group-policy SSL_VPN_Policy01 attributes dns-server value 192.0.2.13 vpn-idle-timeout none vpn-filter none vpn-tunnel-protocol ssl-client ssl-clientless split-tunnel-policy tunnelspecified split-tunnel-network-list value SPLIT_TUNNEL_NETS default-domain value pennington.net webvpn anyconnect keep-installer installed anyconnect ssl rekey time 30 anyconnect ssl rekey method ssl anyconnect ask none default anyconnect username mpenning password dXRTaA5wrZ3OL8gz encrypted privilege 15 tunnel-group DefaultWEBVPNGroup general-attributes address-pool SSL_VPN_ADDRS default-group-policy SSL_VPN_Policy01 ! ! policy-map type inspect dns preset_dns_map parameters message-length maximum client auto message-length maximum 512 policy-map global_policy class inspection_default inspect dns preset_dns_map inspect h323 h225 inspect h323 ras inspect rsh inspect rtsp inspect esmtp inspect sqlnet inspect skinny inspect sunrpc inspect xdmcp inspect sip inspect netbios inspect tftp inspect ip-options inspect icmp inspect http ! service-policy global_policy global prompt hostname context no call-home reporting anonymous call-home profile CiscoTAC-1 no active destination address http https://tools.cisco.com/its/service/oddce/services/DDCEService destination address email [email protected] destination transport-method http subscribe-to-alert-group diagnostic subscribe-to-alert-group environment subscribe-to-alert-group inventory periodic monthly subscribe-to-alert-group configuration periodic monthly subscribe-to-alert-group telemetry periodic daily Cryptochecksum:571d01b7b08342e35db838e9acec00f6 : end""".splitlines() n01 = """ feature tacacs+ feature interface-vlan feature vpc feature fex feature lacp feature lldp feature ospf no feature telnet ip domain-lookup ip domain-name pennington.net ip name-server 10.0.0.10 vrf context management ip route 0.0.0.0/0 10.0.0.1 vrf context vpc-peerkeepalive tacacs-server key 0 DontTreadOnMe ip tacacs source-interface Vlan10 tacacs-server host 10.0.0.32 tacacs-server host 10.0.0.33 aaa group server tacacs+ TACACS_GROUP server 10.0.0.32 server 10.0.0.33 use-vrf management source-interface mgmt0 aaa authentication login default group TACACS_GROUP aaa authentication login console group TACACS_GROUP aaa authorization commands default group TACACS_GROUP aaa accounting default group TACACS_GROUP aaa authentication login error-enable logging event link-status default vpc domain 999 role priority 100 system-priority 1 auto-recovery peer-keepalive destination 1.1.1.2 fex 115 desc FEX115 pinning max-links 1 interface loopback0 ip address 10.1.1.1/32 interface mgmt0 ip address 10.0.0.5/24 interface port-channel1 vpc peer-link switchport mode trunk spanning-tree port type network description [vPC PEER LINK] interface port-channel21 description Uplink to core switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,155 mtu 9216 vpc 21 interface port-channel115 switchport mode fex-fabric fex associate 115 interface Ethernet1/1 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/2 switchport mode trunk spanning-tree port type network channel-group 1 mode active interface Ethernet1/3 ip address 192.0.2.0/31 interface Ethernet1/4 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/5 switchport mode trunk switchport trunk native vlan 999 switchport trunk allowed vlan 13,31-38,15 channel-group 21 mode active mtu 9216 interface Ethernet1/6 switchport mode fex-fabric fex associate 115 channel-group 115 interface Ethernet1/7 switchport mode access switchport access vlan 100 mtu 9216 interface Ethernet1/8 switchport mode access switchport access vlan 102 mtu 9216 interface Ethernet1/9 ip address 10.1.2.6/30 mtu 9216 interface Ethernet1/10 encapsulation dot1Q 200 bandwidth 100000000 delay 200 beacon ip address 10.1.2.2/30 mpls ip mtu 9216 """.splitlines() @pytest.fixture(scope="session") def c01_default_gigethernets(request): yield config_c01_default_gige @pytest.fixture(scope="session") def c01_insert_serial_replace(request): yield config_c01_insert_serial_replace @pytest.fixture(scope="function") def parse_c01(request): """Preparsed c01"""
parse_c01 = CiscoConfParse(c01, factory=False)
0
2023-12-01 18:43:27+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HOME\"], \"Library\", \"Application Support\", \"zerolink\", \"config\"\n )\ndef create_config() -> None:\ndef get_config() -> configparser.ConfigParser:\ndef get_config_path() -> str:\ndef get_config_var(var: str) -> str:\ndef write_config_var(var: str, value: str):\ndef write_api_key(api_key: str):\ndef read_api_key() -> Optional[str]:" }, { "identifier": "APIError", "path": "zerolink/exc.py", "snippet": "class APIError(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n def __str__(self) -> str:\n return self.message" }, { "identifier": "AuthenticationError", "path": "zerolink/exc.py", "snippet": "class AuthenticationError(Exception):\n def __init__(self) -> None:\n pass\n\n def __str__(self) -> str:\n return \"No API key. Please run `zerolink key` or set the ZEROLINK_API_KEY environment variable\"" }, { "identifier": "Client", "path": "zerolink_client/client.py", "snippet": "class Client:\n \"\"\"A class for keeping track of data related to the API\n\n The following are accepted as keyword arguments and will be used to construct httpx Clients internally:\n\n ``base_url``: The base URL for the API, all requests are made to a relative path to this URL\n\n ``cookies``: A dictionary of cookies to be sent with every request\n\n ``headers``: A dictionary of headers to be sent with every request\n\n ``timeout``: The maximum amount of a time a request can take. API functions will raise\n httpx.TimeoutException if this is exceeded.\n\n ``verify_ssl``: Whether or not to verify the SSL certificate of the API server. This should be True in production,\n but can be set to False for testing purposes.\n\n ``follow_redirects``: Whether or not to follow redirects. Default value is False.\n\n ``httpx_args``: A dictionary of additional arguments to be passed to the ``httpx.Client`` and ``httpx.AsyncClient`` constructor.\n\n\n Attributes:\n raise_on_unexpected_status: Whether or not to raise an errors.UnexpectedStatus if the API returns a\n status code that was not documented in the source OpenAPI document. Can also be provided as a keyword\n argument to the constructor.\n \"\"\"\n\n raise_on_unexpected_status: bool = field(default=False, kw_only=True)\n _base_url: str\n _cookies: Dict[str, str] = field(factory=dict, kw_only=True)\n _headers: Dict[str, str] = field(factory=dict, kw_only=True)\n _timeout: Optional[httpx.Timeout] = field(default=None, kw_only=True)\n _verify_ssl: Union[str, bool, ssl.SSLContext] = field(default=True, kw_only=True)\n _follow_redirects: bool = field(default=False, kw_only=True)\n _httpx_args: Dict[str, Any] = field(factory=dict, kw_only=True)\n _client: Optional[httpx.Client] = field(default=None, init=False)\n _async_client: Optional[httpx.AsyncClient] = field(default=None, init=False)\n\n def with_headers(self, headers: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional headers\"\"\"\n if self._client is not None:\n self._client.headers.update(headers)\n if self._async_client is not None:\n self._async_client.headers.update(headers)\n return evolve(self, headers={**self._headers, **headers})\n\n def with_cookies(self, cookies: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional cookies\"\"\"\n if self._client is not None:\n self._client.cookies.update(cookies)\n if self._async_client is not None:\n self._async_client.cookies.update(cookies)\n return evolve(self, cookies={**self._cookies, **cookies})\n\n def with_timeout(self, timeout: httpx.Timeout) -> \"Client\":\n \"\"\"Get a new client matching this one with a new timeout (in seconds)\"\"\"\n if self._client is not None:\n self._client.timeout = timeout\n if self._async_client is not None:\n self._async_client.timeout = timeout\n return evolve(self, timeout=timeout)\n\n def set_httpx_client(self, client: httpx.Client) -> \"Client\":\n \"\"\"Manually the underlying httpx.Client\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._client = client\n return self\n\n def get_httpx_client(self) -> httpx.Client:\n \"\"\"Get the underlying httpx.Client, constructing a new one if not previously set\"\"\"\n if self._client is None:\n self._client = httpx.Client(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._client\n\n def __enter__(self) -> \"Client\":\n \"\"\"Enter a context manager for self.client—you cannot enter twice (see httpx docs)\"\"\"\n self.get_httpx_client().__enter__()\n return self\n\n def __exit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for internal httpx.Client (see httpx docs)\"\"\"\n self.get_httpx_client().__exit__(*args, **kwargs)\n\n def set_async_httpx_client(self, async_client: httpx.AsyncClient) -> \"Client\":\n \"\"\"Manually the underlying httpx.AsyncClient\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._async_client = async_client\n return self\n\n def get_async_httpx_client(self) -> httpx.AsyncClient:\n \"\"\"Get the underlying httpx.AsyncClient, constructing a new one if not previously set\"\"\"\n if self._async_client is None:\n self._async_client = httpx.AsyncClient(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._async_client\n\n async def __aenter__(self) -> \"Client\":\n \"\"\"Enter a context manager for underlying httpx.AsyncClient—you cannot enter twice (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aenter__()\n return self\n\n async def __aexit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for underlying httpx.AsyncClient (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aexit__(*args, **kwargs)" }, { "identifier": "finetune", "path": "zerolink_client/api/default/finetune.py", "snippet": "def _get_kwargs(\n *,\n file: Union[File, str],\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:" }, { "identifier": "get_models_models_get", "path": "zerolink_client/api/default/get_models_models_get.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[ModelList]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[ModelList]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:" }, { "identifier": "desc_entity_id", "path": "zerolink_client/api/entity/desc_entity_id.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Entity, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:" }, { "identifier": "desc_entity_ontology", "path": "zerolink_client/api/entity/desc_entity_ontology.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Any, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Any, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:" }, { "identifier": "lookup_entity", "path": "zerolink_client/api/entity/lookup_entity.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:" }, { "identifier": "lookup_relation", "path": "zerolink_client/api/entity/lookup_relation.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:" }, { "identifier": "search_entity", "path": "zerolink_client/api/entity/search_entity.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:" }, { "identifier": "extract_text", "path": "zerolink_client/api/extract/extract_text.py", "snippet": "def _get_kwargs(\n *,\n body: TextExtract,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:" }, { "identifier": "create_userattribute", "path": "zerolink_client/api/fact/create_userattribute.py", "snippet": "def _get_kwargs(\n *,\n body: CreateAttribute,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[GenericResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:" }, { "identifier": "create_userentity", "path": "zerolink_client/api/fact/create_userentity.py", "snippet": "def _get_kwargs(\n *,\n body: CreateEntity,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:" }, { "identifier": "create_userrule", "path": "zerolink_client/api/fact/create_userrule.py", "snippet": "def _get_kwargs(\n *,\n body: CreateRule,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:" }, { "identifier": "create_usertriple", "path": "zerolink_client/api/fact/create_usertriple.py", "snippet": "def _get_kwargs(\n *,\n body: CreateTriple,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:" }, { "identifier": "get_triple", "path": "zerolink_client/api/kg/get_triple.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:" }, { "identifier": "post_question", "path": "zerolink_client/api/question/post_question.py", "snippet": "def _get_kwargs(\n *,\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:" }, { "identifier": "create_session", "path": "zerolink_client/api/session/create_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n *,\n name: Union[Unset, str] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "get_session_entities", "path": "zerolink_client/api/session/get_session_entities.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:" }, { "identifier": "get_session_facts", "path": "zerolink_client/api/session/get_session_facts.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:" }, { "identifier": "get_user_session", "path": "zerolink_client/api/session/get_user_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n session_name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "create_user", "path": "zerolink_client/api/user/create_user.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[CreateUser]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[CreateUser]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:" }, { "identifier": "ChatSession", "path": "zerolink_client/models/chat_session.py", "snippet": "class ChatSession:\n \"\"\"A user chat session.\n\n Attributes:\n id (int):\n name (str): The name of the chat session\n index (int):\n requests (List['Req']):\n responses (List['Rep']):\n created_on (datetime.datetime):\n \"\"\"\n\n id: int\n name: str\n index: int\n requests: List[\"Req\"]\n responses: List[\"Rep\"]\n created_on: datetime.datetime\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.rep import Rep\n from ..models.req import Req\n\n id = self.id\n\n name = self.name\n\n index = self.index\n\n requests = []\n for requests_item_data in self.requests:\n requests_item = requests_item_data.to_dict()\n requests.append(requests_item)\n\n responses = []\n for responses_item_data in self.responses:\n responses_item = responses_item_data.to_dict()\n responses.append(responses_item)\n\n created_on = self.created_on.isoformat()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"name\": name,\n \"index\": index,\n \"requests\": requests,\n \"responses\": responses,\n \"created_on\": created_on,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.rep import Rep\n from ..models.req import Req\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n name = d.pop(\"name\")\n\n index = d.pop(\"index\")\n\n requests = []\n _requests = d.pop(\"requests\")\n for requests_item_data in _requests:\n requests_item = Req.from_dict(requests_item_data)\n\n requests.append(requests_item)\n\n responses = []\n _responses = d.pop(\"responses\")\n for responses_item_data in _responses:\n responses_item = Rep.from_dict(responses_item_data)\n\n responses.append(responses_item)\n\n created_on = isoparse(d.pop(\"created_on\"))\n\n chat_session = cls(\n id=id,\n name=name,\n index=index,\n requests=requests,\n responses=responses,\n created_on=created_on,\n )\n\n chat_session.additional_properties = d\n return chat_session\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateAttribute", "path": "zerolink_client/models/create_attribute.py", "snippet": "class CreateAttribute:\n \"\"\"\n Attributes:\n subject (str): EID of a builtin entity\n predicate (str): Name of attribute\n attribute (Attribute):\n \"\"\"\n\n subject: str\n predicate: str\n attribute: \"Attribute\"\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.attribute import Attribute\n\n subject = self.subject\n\n predicate = self.predicate\n\n attribute = self.attribute.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"subject\": subject,\n \"predicate\": predicate,\n \"attribute\": attribute,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.attribute import Attribute\n\n d = src_dict.copy()\n subject = d.pop(\"subject\")\n\n predicate = d.pop(\"predicate\")\n\n attribute = Attribute.from_dict(d.pop(\"attribute\"))\n\n create_attribute = cls(\n subject=subject,\n predicate=predicate,\n attribute=attribute,\n )\n\n create_attribute.additional_properties = d\n return create_attribute\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateEntity", "path": "zerolink_client/models/create_entity.py", "snippet": "class CreateEntity:\n \"\"\"\n Attributes:\n entity (str): Name of entity\n entity_type (Union[Unset, EntityType]): Entity types are entities that map to base ontological entities in\n Foundation.\n entity_str (Union[Unset, str]): User specified type\n is_class (Union[Unset, bool]): Whether the entity is a class or instance Default: False.\n \"\"\"\n\n entity: str\n entity_type: Union[Unset, EntityType] = UNSET\n entity_str: Union[Unset, str] = UNSET\n is_class: Union[Unset, bool] = False\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n entity = self.entity\n\n entity_type: Union[Unset, str] = UNSET\n if not isinstance(self.entity_type, Unset):\n entity_type = self.entity_type.value\n\n entity_str = self.entity_str\n\n is_class = self.is_class\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"entity\": entity,\n }\n )\n if entity_type is not UNSET:\n field_dict[\"entity_type\"] = entity_type\n if entity_str is not UNSET:\n field_dict[\"entity_str\"] = entity_str\n if is_class is not UNSET:\n field_dict[\"is_class\"] = is_class\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n entity = d.pop(\"entity\")\n\n _entity_type = d.pop(\"entity_type\", UNSET)\n entity_type: Union[Unset, EntityType]\n if isinstance(_entity_type, Unset):\n entity_type = UNSET\n else:\n entity_type = EntityType(_entity_type)\n\n entity_str = d.pop(\"entity_str\", UNSET)\n\n is_class = d.pop(\"is_class\", UNSET)\n\n create_entity = cls(\n entity=entity,\n entity_type=entity_type,\n entity_str=entity_str,\n is_class=is_class,\n )\n\n create_entity.additional_properties = d\n return create_entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRule", "path": "zerolink_client/models/create_rule.py", "snippet": "class CreateRule:\n \"\"\"\n Attributes:\n rule (str): Textual representation of the rule to parse\n context (Union[Unset, CreateRuleContext]): Context of entities to use for parsing the rule\n \"\"\"\n\n rule: str\n context: Union[Unset, \"CreateRuleContext\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.create_rule_context import CreateRuleContext\n\n rule = self.rule\n\n context: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"rule\": rule,\n }\n )\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.create_rule_context import CreateRuleContext\n\n d = src_dict.copy()\n rule = d.pop(\"rule\")\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, CreateRuleContext]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = CreateRuleContext.from_dict(_context)\n\n create_rule = cls(\n rule=rule,\n context=context,\n )\n\n create_rule.additional_properties = d\n return create_rule\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRuleResponse", "path": "zerolink_client/models/create_rule_response.py", "snippet": "class CreateRuleResponse:\n \"\"\"\n Attributes:\n id (str):\n \"\"\"\n\n id: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n create_rule_response = cls(\n id=id,\n )\n\n create_rule_response.additional_properties = d\n return create_rule_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTriple", "path": "zerolink_client/models/create_triple.py", "snippet": "class CreateTriple:\n \"\"\"\n Attributes:\n predicate (str): Name of predicate relation\n user_subject (Union[Unset, str]): EID of a user entity\n subject (Union[Unset, str]): EID of a builtin entity\n user_object (Union[Unset, str]): EID of a user entity\n object_ (Union[Unset, str]): EID of a builtin entity\n \"\"\"\n\n predicate: str\n user_subject: Union[Unset, str] = UNSET\n subject: Union[Unset, str] = UNSET\n user_object: Union[Unset, str] = UNSET\n object_: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n predicate = self.predicate\n\n user_subject = self.user_subject\n\n subject = self.subject\n\n user_object = self.user_object\n\n object_ = self.object_\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"predicate\": predicate,\n }\n )\n if user_subject is not UNSET:\n field_dict[\"user_subject\"] = user_subject\n if subject is not UNSET:\n field_dict[\"subject\"] = subject\n if user_object is not UNSET:\n field_dict[\"user_object\"] = user_object\n if object_ is not UNSET:\n field_dict[\"object\"] = object_\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n predicate = d.pop(\"predicate\")\n\n user_subject = d.pop(\"user_subject\", UNSET)\n\n subject = d.pop(\"subject\", UNSET)\n\n user_object = d.pop(\"user_object\", UNSET)\n\n object_ = d.pop(\"object\", UNSET)\n\n create_triple = cls(\n predicate=predicate,\n user_subject=user_subject,\n subject=subject,\n user_object=user_object,\n object_=object_,\n )\n\n create_triple.additional_properties = d\n return create_triple\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTuneJobResponse", "path": "zerolink_client/models/create_tune_job_response.py", "snippet": "class CreateTuneJobResponse:\n \"\"\"\n Attributes:\n id (str):\n status (str):\n \"\"\"\n\n id: str\n status: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n status = self.status\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"status\": status,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n status = d.pop(\"status\")\n\n create_tune_job_response = cls(\n id=id,\n status=status,\n )\n\n create_tune_job_response.additional_properties = d\n return create_tune_job_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Entity", "path": "zerolink_client/models/entity.py", "snippet": "class Entity:\n \"\"\"\n Attributes:\n id (str):\n entity (str):\n description (Union[Unset, str]):\n source (Union[Unset, str]):\n source_url (Union[Unset, str]):\n ontology (Union[Unset, Graph]):\n source_id (Union[Unset, str]):\n \"\"\"\n\n id: str\n entity: str\n description: Union[Unset, str] = UNSET\n source: Union[Unset, str] = UNSET\n source_url: Union[Unset, str] = UNSET\n ontology: Union[Unset, \"Graph\"] = UNSET\n source_id: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.graph import Graph\n\n id = self.id\n\n entity = self.entity\n\n description = self.description\n\n source = self.source\n\n source_url = self.source_url\n\n ontology: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.ontology, Unset):\n ontology = self.ontology.to_dict()\n\n source_id = self.source_id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"entity\": entity,\n }\n )\n if description is not UNSET:\n field_dict[\"description\"] = description\n if source is not UNSET:\n field_dict[\"source\"] = source\n if source_url is not UNSET:\n field_dict[\"source_url\"] = source_url\n if ontology is not UNSET:\n field_dict[\"ontology\"] = ontology\n if source_id is not UNSET:\n field_dict[\"source_id\"] = source_id\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.graph import Graph\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n entity = d.pop(\"entity\")\n\n description = d.pop(\"description\", UNSET)\n\n source = d.pop(\"source\", UNSET)\n\n source_url = d.pop(\"source_url\", UNSET)\n\n _ontology = d.pop(\"ontology\", UNSET)\n ontology: Union[Unset, Graph]\n if isinstance(_ontology, Unset):\n ontology = UNSET\n else:\n ontology = Graph.from_dict(_ontology)\n\n source_id = d.pop(\"source_id\", UNSET)\n\n entity = cls(\n id=id,\n entity=entity,\n description=description,\n source=source,\n source_url=source_url,\n ontology=ontology,\n source_id=source_id,\n )\n\n entity.additional_properties = d\n return entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "HTTPValidationError", "path": "zerolink_client/models/http_validation_error.py", "snippet": "class HTTPValidationError:\n \"\"\"\n Attributes:\n detail (Union[Unset, List['ValidationError']]):\n \"\"\"\n\n detail: Union[Unset, List[\"ValidationError\"]] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.validation_error import ValidationError\n\n detail: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.detail, Unset):\n detail = []\n for detail_item_data in self.detail:\n detail_item = detail_item_data.to_dict()\n detail.append(detail_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if detail is not UNSET:\n field_dict[\"detail\"] = detail\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.validation_error import ValidationError\n\n d = src_dict.copy()\n detail = []\n _detail = d.pop(\"detail\", UNSET)\n for detail_item_data in _detail or []:\n detail_item = ValidationError.from_dict(detail_item_data)\n\n detail.append(detail_item)\n\n http_validation_error = cls(\n detail=detail,\n )\n\n http_validation_error.additional_properties = d\n return http_validation_error\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Question", "path": "zerolink_client/models/question.py", "snippet": "class Question:\n \"\"\"A question to be answered by querying the knowledge graph and reasoner.\n\n Attributes:\n body (str): The body of the question\n world (Union[Unset, WorldAssumption]): The world assumption is the assumption about the world that the reasoner\n makes. This is used to determine the answer to a query. For example, if\n the world assumption is \"closed\" then the reasoner will assume that the\n answer to the query is \"no\" if it cannot find a triple to satisfy the\n query. Default: WorldAssumption.CLOSED.\n spatial (Union[Unset, SpatialAssumption]): The spatial assumption is the assumption about space that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if the\n spatial assumption is \"earth\" then the reasoner will only consider\n geographic locations on Earth and will assume all instances of 'location'\n are on Earth. If the spatial assumption is \"universe\" then the reasoner\n then this restriction is lifted and the reasoner will consider all\n locations in the universe. Default: SpatialAssumption.EARTH.\n temporal (Union[Unset, TemporalAssumption]): The temporal assumption is the assumption about time that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if\n the temporal assumption is \"current\" then the reasoner will only consider\n triples that refer to entities that are non-historical. Excluding things\n like the Roman Empire and Francoist Spain. Default: TemporalAssumption.CURRENT.\n context (Union[Unset, ContextAssumption]): The context assumption is the assumption about the context that the\n reasoner makes. This is used to determine the answer to a query. For\n example, if the context assumption is \"none\" then the reasoner will only\n consider basic triples like instance_of and subclass_of. If the context\n assumption is \"local\" then the reasoner will consider triples that are\n defined by the user. If the context assumption is \"global\" then the\n reasoner will consider all queryable triples. Default: ContextAssumption.GLOBAL.\n \"\"\"\n\n body: str\n world: Union[Unset, WorldAssumption] = WorldAssumption.CLOSED\n spatial: Union[Unset, SpatialAssumption] = SpatialAssumption.EARTH\n temporal: Union[Unset, TemporalAssumption] = TemporalAssumption.CURRENT\n context: Union[Unset, ContextAssumption] = ContextAssumption.GLOBAL\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n body = self.body\n\n world: Union[Unset, str] = UNSET\n if not isinstance(self.world, Unset):\n world = self.world.value\n\n spatial: Union[Unset, str] = UNSET\n if not isinstance(self.spatial, Unset):\n spatial = self.spatial.value\n\n temporal: Union[Unset, str] = UNSET\n if not isinstance(self.temporal, Unset):\n temporal = self.temporal.value\n\n context: Union[Unset, str] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"body\": body,\n }\n )\n if world is not UNSET:\n field_dict[\"world\"] = world\n if spatial is not UNSET:\n field_dict[\"spatial\"] = spatial\n if temporal is not UNSET:\n field_dict[\"temporal\"] = temporal\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n body = d.pop(\"body\")\n\n _world = d.pop(\"world\", UNSET)\n world: Union[Unset, WorldAssumption]\n if isinstance(_world, Unset):\n world = UNSET\n else:\n world = WorldAssumption(_world)\n\n _spatial = d.pop(\"spatial\", UNSET)\n spatial: Union[Unset, SpatialAssumption]\n if isinstance(_spatial, Unset):\n spatial = UNSET\n else:\n spatial = SpatialAssumption(_spatial)\n\n _temporal = d.pop(\"temporal\", UNSET)\n temporal: Union[Unset, TemporalAssumption]\n if isinstance(_temporal, Unset):\n temporal = UNSET\n else:\n temporal = TemporalAssumption(_temporal)\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, ContextAssumption]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = ContextAssumption(_context)\n\n question = cls(\n body=body,\n world=world,\n spatial=spatial,\n temporal=temporal,\n context=context,\n )\n\n question.additional_properties = d\n return question\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "QuestionResponse", "path": "zerolink_client/models/question_response.py", "snippet": "class QuestionResponse:\n \"\"\"A response to a question request.\n\n Attributes:\n id (int): The ID of the question\n msg (str): A message describing the result of the question\n status (ResultStatus): The status of a result.\n answers (List[str]): The answers to the question\n methods (List[str]): The methods used to answer the question\n reasoners (List[str]): The reasoners used to answer the question\n query (Union[Unset, QuestionResponseQuery]): The query used to answer the question\n \"\"\"\n\n id: int\n msg: str\n status: ResultStatus\n answers: List[str]\n methods: List[str]\n reasoners: List[str]\n query: Union[Unset, \"QuestionResponseQuery\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.question_response_query import QuestionResponseQuery\n\n id = self.id\n\n msg = self.msg\n\n status = self.status.value\n\n answers = self.answers\n\n methods = self.methods\n\n reasoners = self.reasoners\n\n query: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.query, Unset):\n query = self.query.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"msg\": msg,\n \"status\": status,\n \"answers\": answers,\n \"methods\": methods,\n \"reasoners\": reasoners,\n }\n )\n if query is not UNSET:\n field_dict[\"query\"] = query\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.question_response_query import QuestionResponseQuery\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n msg = d.pop(\"msg\")\n\n status = ResultStatus(d.pop(\"status\"))\n\n answers = cast(List[str], d.pop(\"answers\"))\n\n methods = cast(List[str], d.pop(\"methods\"))\n\n reasoners = cast(List[str], d.pop(\"reasoners\"))\n\n _query = d.pop(\"query\", UNSET)\n query: Union[Unset, QuestionResponseQuery]\n if isinstance(_query, Unset):\n query = UNSET\n else:\n query = QuestionResponseQuery.from_dict(_query)\n\n question_response = cls(\n id=id,\n msg=msg,\n status=status,\n answers=answers,\n methods=methods,\n reasoners=reasoners,\n query=query,\n )\n\n question_response.additional_properties = d\n return question_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "TextExtract", "path": "zerolink_client/models/text_extract.py", "snippet": "class TextExtract:\n \"\"\"\n Attributes:\n text (str): Text to extract from\n extraction_model (Union[Unset, ExtractModel]): An enumeration. Default: ExtractModel.BASE.\n \"\"\"\n\n text: str\n extraction_model: Union[Unset, ExtractModel] = ExtractModel.BASE\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n text = self.text\n\n extraction_model: Union[Unset, str] = UNSET\n if not isinstance(self.extraction_model, Unset):\n extraction_model = self.extraction_model.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"text\": text,\n }\n )\n if extraction_model is not UNSET:\n field_dict[\"extraction_model\"] = extraction_model\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n text = d.pop(\"text\")\n\n _extraction_model = d.pop(\"extraction_model\", UNSET)\n extraction_model: Union[Unset, ExtractModel]\n if isinstance(_extraction_model, Unset):\n extraction_model = UNSET\n else:\n extraction_model = ExtractModel(_extraction_model)\n\n text_extract = cls(\n text=text,\n extraction_model=extraction_model,\n )\n\n text_extract.additional_properties = d\n return text_extract\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "File", "path": "zerolink_client/types.py", "snippet": "class File:\n \"\"\"Contains information for file uploads\"\"\"\n\n payload: BinaryIO\n file_name: Optional[str] = None\n mime_type: Optional[str] = None\n\n def to_tuple(self) -> FileJsonType:\n \"\"\"Return a tuple representation that httpx will accept for multipart/form-data\"\"\"\n return self.file_name, self.payload, self.mime_type" }, { "identifier": "UNSET", "path": "zerolink_client/types.py", "snippet": "UNSET: Unset = Unset()" } ]
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
16,468
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key() rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err)
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key() rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def ask_question( session_id: Optional[int], body: str, assumps: Optional[dict[str, Any]] = None, **kwargs, ) -> QuestionResponse: """ Ask a question. """ check_api_key() rep = post_question.sync_detailed( client=client, session_id=(session_id or UNSET), body=Question(body=body, **(assumps or {})), **kwargs, ) if rep.status_code == 200: return cast(QuestionResponse, rep.parsed) else: err = rep.content.decode("utf-8") print(err) raise APIError(err)
def get_entity_id(id: str, **kwargs) -> Entity:
30
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss):\n \"\"\"\n DO NOT APPLY NONLINEARITY IN YOUR NETWORK!\n\n target mut be one hot encoded\n IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!!\n\n :param soft_dice_kwargs:\n :param bce_kwargs:\n :param aggregate:\n \"\"\"\n super(DC_and_BCE_loss, self).__init__()\n if use_ignore_label:\n bce_kwargs['reduction'] = 'none'\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.use_ignore_label = use_ignore_label\n\n self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)\n self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n if self.use_ignore_label:\n # target is one hot encoded here. invert it so that it is True wherever we can compute the loss\n mask = (1 - target[:, -1:]).bool()\n # remove ignore channel now that we have the mask\n target_regions = torch.clone(target[:, :-1])\n else:\n target_regions = target\n mask = None\n\n dc_loss = self.dc(net_output, target_regions, loss_mask=mask)\n if mask is not None:\n ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8)\n else:\n ce_loss = self.ce(net_output, target_regions)\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "DC_and_CE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_CE_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None,\n dice_class=SoftDiceLoss):\n \"\"\"\n Weights for CE and Dice do not need to sum to one. You can set whatever you want.\n :param soft_dice_kwargs:\n :param ce_kwargs:\n :param aggregate:\n :param square_dice:\n :param weight_ce:\n :param weight_dice:\n \"\"\"\n super(DC_and_CE_loss, self).__init__()\n if ignore_label is not None:\n ce_kwargs['ignore_index'] = ignore_label\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.ignore_label = ignore_label\n\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n \"\"\"\n target must be b, c, x, y(, z) with c=1\n :param net_output:\n :param target:\n :return:\n \"\"\"\n if self.ignore_label is not None:\n assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \\\n '(DC_and_CE_loss)'\n mask = (target != self.ignore_label).bool()\n # remove ignore label from target, replace with one of the known labels. It doesn't matter because we\n # ignore gradients in those areas anyway\n target_dice = torch.clone(target)\n target_dice[target == self.ignore_label] = 0\n num_fg = mask.sum()\n else:\n target_dice = target\n mask = None\n\n dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \\\n if self.weight_dice != 0 else 0\n ce_loss = self.ce(net_output, target[:, 0].long()) \\\n if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0\n\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "get_tp_fp_fn_tn", "path": "nnunetv2/training/loss/dice.py", "snippet": "def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):\n \"\"\"\n net_output must be (b, c, x, y(, z)))\n gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))\n if mask is provided it must have shape (b, 1, x, y(, z)))\n :param net_output:\n :param gt:\n :param axes: can be (, ) = no summation\n :param mask: mask must be 1 for valid pixels and 0 for invalid pixels\n :param square: if True then fp, tp and fn will be squared before summation\n :return:\n \"\"\"\n if axes is None:\n axes = tuple(range(2, len(net_output.size())))\n\n shp_x = net_output.shape\n shp_y = gt.shape\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n gt = gt.view((shp_y[0], 1, *shp_y[1:]))\n\n if net_output.shape == gt.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = gt\n else:\n gt = gt.long()\n y_onehot = torch.zeros(shp_x, device=net_output.device)\n y_onehot.scatter_(1, gt, 1)\n\n tp = net_output * y_onehot\n fp = net_output * (1 - y_onehot)\n fn = (1 - net_output) * y_onehot\n tn = (1 - net_output) * (1 - y_onehot)\n\n if mask is not None:\n with torch.no_grad():\n mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))]))\n tp *= mask_here\n fp *= mask_here\n fn *= mask_here\n tn *= mask_here\n # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes\n # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram\n # (using nnUNetv2_train 998 3d_fullres 0)\n # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)\n # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)\n # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)\n # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)\n\n if square:\n tp = tp ** 2\n fp = fp ** 2\n fn = fn ** 2\n tn = tn ** 2\n\n if len(axes) > 0:\n tp = tp.sum(dim=axes, keepdim=False)\n fp = fp.sum(dim=axes, keepdim=False)\n fn = fn.sum(dim=axes, keepdim=False)\n tn = tn.sum(dim=axes, keepdim=False)\n\n return tp, fp, fn, tn" }, { "identifier": "MemoryEfficientSoftDiceLoss", "path": "nnunetv2/training/loss/dice.py", "snippet": "class MemoryEfficientSoftDiceLoss(nn.Module):\n def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1.,\n ddp: bool = True):\n \"\"\"\n saves 1.6 GB on Dataset017 3d_lowres\n \"\"\"\n super(MemoryEfficientSoftDiceLoss, self).__init__()\n\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n self.ddp = ddp\n\n def forward(self, x, y, loss_mask=None):\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n # make everything shape (b, c)\n axes = list(range(2, len(x.shape)))\n with torch.no_grad():\n if len(x.shape) != len(y.shape):\n y = y.view((y.shape[0], 1, *y.shape[1:]))\n\n if x.shape == y.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = y\n else:\n gt = y.long()\n y_onehot = torch.zeros(x.shape, device=x.device, dtype=torch.bool)\n y_onehot.scatter_(1, gt, 1)\n\n if not self.do_bg:\n y_onehot = y_onehot[:, 1:]\n\n sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes)\n\n # this one MUST be outside the with torch.no_grad(): context. Otherwise no gradients for you\n if not self.do_bg:\n x = x[:, 1:]\n\n intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes)\n sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes)\n\n if self.ddp and self.batch_dice:\n intersect = AllGatherGrad.apply(intersect).sum(0)\n sum_pred = AllGatherGrad.apply(sum_pred).sum(0)\n sum_gt = AllGatherGrad.apply(sum_gt).sum(0)\n\n if self.batch_dice:\n intersect = intersect.sum(0)\n sum_pred = sum_pred.sum(0)\n sum_gt = sum_gt.sum(0)\n\n dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8))\n\n dc = dc.mean()\n return -dc" }, { "identifier": "nnUNetTrainer", "path": "nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py", "snippet": "class nnUNetTrainer(object):\n def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,\n device: torch.device = torch.device('cuda')):\n # From https://grugbrain.dev/. Worth a read ya big brains ;-)\n\n # apex predator of grug is complexity\n # complexity bad\n # say again:\n # complexity very bad\n # you say now:\n # complexity very, very bad\n # given choice between complexity or one on one against t-rex, grug take t-rex: at least grug see t-rex\n # complexity is spirit demon that enter codebase through well-meaning but ultimately very clubbable non grug-brain developers and project managers who not fear complexity spirit demon or even know about sometime\n # one day code base understandable and grug can get work done, everything good!\n # next day impossible: complexity demon spirit has entered code and very dangerous situation!\n\n # OK OK I am guilty. But I tried.\n # https://www.osnews.com/images/comics/wtfm.jpg\n # https://i.pinimg.com/originals/26/b2/50/26b250a738ea4abc7a5af4d42ad93af0.jpg\n\n self.is_ddp = dist.is_available() and dist.is_initialized()\n self.local_rank = 0 if not self.is_ddp else dist.get_rank()\n\n self.device = device\n\n # print what device we are using\n if self.is_ddp: # implicitly it's clear that we use cuda in this case\n print(f\"I am local rank {self.local_rank}. {device_count()} GPUs are available. The world size is \"\n f\"{dist.get_world_size()}.\"\n f\"Setting device to {self.device}\")\n self.device = torch.device(type='cuda', index=self.local_rank)\n else:\n if self.device.type == 'cuda':\n # we might want to let the user pick this but for now please pick the correct GPU with CUDA_VISIBLE_DEVICES=X\n self.device = torch.device(type='cuda', index=0)\n print(f\"Using device: {self.device}\")\n\n # loading and saving this class for continuing from checkpoint should not happen based on pickling. This\n # would also pickle the network etc. Bad, bad. Instead we just reinstantiate and then load the checkpoint we\n # need. So let's save the init args\n self.my_init_kwargs = {}\n for k in inspect.signature(self.__init__).parameters.keys():\n self.my_init_kwargs[k] = locals()[k]\n\n ### Saving all the init args into class variables for later access\n self.plans_manager = PlansManager(plans)\n self.configuration_manager = self.plans_manager.get_configuration(configuration)\n self.configuration_name = configuration\n self.dataset_json = dataset_json\n self.fold = fold\n self.unpack_dataset = unpack_dataset\n\n ### Setting all the folder names. We need to make sure things don't crash in case we are just running\n # inference and some of the folders may not be defined!\n self.preprocessed_dataset_folder_base = join(nnUNet_preprocessed, self.plans_manager.dataset_name) \\\n if nnUNet_preprocessed is not None else None\n self.output_folder_base = join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" + configuration) \\\n if nnUNet_results is not None else None\n self.output_folder = join(self.output_folder_base, f'fold_{fold}')\n\n self.preprocessed_dataset_folder = join(self.preprocessed_dataset_folder_base,\n self.configuration_manager.data_identifier)\n # unlike the previous nnunet folder_with_segs_from_previous_stage is now part of the plans. For now it has to\n # be a different configuration in the same plans\n # IMPORTANT! the mapping must be bijective, so lowres must point to fullres and vice versa (using\n # \"previous_stage\" and \"next_stage\"). Otherwise it won't work!\n self.is_cascaded = self.configuration_manager.previous_stage_name is not None\n self.folder_with_segs_from_previous_stage = \\\n join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" +\n self.configuration_manager.previous_stage_name, 'predicted_next_stage', self.configuration_name) \\\n if self.is_cascaded else None\n\n ### Some hyperparameters for you to fiddle with\n self.initial_lr = 1e-2\n self.weight_decay = 3e-5\n self.oversample_foreground_percent = 0.33\n self.num_iterations_per_epoch = 250\n self.num_val_iterations_per_epoch = 50\n self.num_epochs = 1000\n self.current_epoch = 0\n\n ### Dealing with labels/regions\n self.label_manager = self.plans_manager.get_label_manager(dataset_json)\n # labels can either be a list of int (regular training) or a list of tuples of int (region-based training)\n # needed for predictions. We do sigmoid in case of (overlapping) regions\n\n self.num_input_channels = None # -> self.initialize()\n self.network = None # -> self._get_network()\n self.optimizer = self.lr_scheduler = None # -> self.initialize\n self.grad_scaler = GradScaler() if self.device.type == 'cuda' else None\n self.loss = None # -> self.initialize\n\n ### Simple logging. Don't take that away from me!\n # initialize log file. This is just our log for the print statements etc. Not to be confused with lightning\n # logging\n timestamp = datetime.now()\n maybe_mkdir_p(self.output_folder)\n self.log_file = join(self.output_folder, \"training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt\" %\n (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))\n self.logger = nnUNetLogger()\n\n ### placeholders\n self.dataloader_train = self.dataloader_val = None # see on_train_start\n\n ### initializing stuff for remembering things and such\n self._best_ema = None\n\n ### inference things\n self.inference_allowed_mirroring_axes = None # this variable is set in\n # self.configure_rotation_dummyDA_mirroring_and_inital_patch_size and will be saved in checkpoints\n\n ### checkpoint saving stuff\n self.save_every = 50\n self.disable_checkpointing = False\n\n ## DDP batch size and oversampling can differ between workers and needs adaptation\n # we need to change the batch size in DDP because we don't use any of those distributed samplers\n self._set_batch_size_and_oversample()\n\n self.was_initialized = False\n\n self.print_to_log_file(\"\\n#######################################################################\\n\"\n \"Please cite the following paper when using nnU-Net:\\n\"\n \"Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). \"\n \"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. \"\n \"Nature methods, 18(2), 203-211.\\n\"\n \"#######################################################################\\n\",\n also_print_to_console=True, add_timestamp=False)\n\n def initialize(self):\n if not self.was_initialized:\n self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager,\n self.dataset_json)\n\n self.network = self.build_network_architecture(self.plans_manager, self.dataset_json,\n self.configuration_manager,\n self.num_input_channels,\n enable_deep_supervision=True).to(self.device)\n # compile network for free speedup\n if self._do_i_compile():\n self.print_to_log_file('Compiling network...')\n self.network = torch.compile(self.network)\n\n self.optimizer, self.lr_scheduler = self.configure_optimizers()\n # if ddp, wrap in DDP wrapper\n if self.is_ddp:\n self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network)\n self.network = DDP(self.network, device_ids=[self.local_rank])\n\n self.loss = self._build_loss()\n self.was_initialized = True\n else:\n raise RuntimeError(\"You have called self.initialize even though the trainer was already initialized. \"\n \"That should not happen.\")\n\n def _do_i_compile(self):\n return ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't'))\n\n def _save_debug_information(self):\n # saving some debug information\n if self.local_rank == 0:\n dct = {}\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)) or k in ['loss', ]:\n dct[k] = str(getattr(self, k))\n elif k in ['network', ]:\n dct[k] = str(getattr(self, k).__class__.__name__)\n else:\n # print(k)\n pass\n if k in ['dataloader_train', 'dataloader_val']:\n if hasattr(getattr(self, k), 'generator'):\n dct[k + '.generator'] = str(getattr(self, k).generator)\n if hasattr(getattr(self, k), 'num_processes'):\n dct[k + '.num_processes'] = str(getattr(self, k).num_processes)\n if hasattr(getattr(self, k), 'transform'):\n dct[k + '.transform'] = str(getattr(self, k).transform)\n import subprocess\n hostname = subprocess.getoutput(['hostname'])\n dct['hostname'] = hostname\n torch_version = torch.__version__\n if self.device.type == 'cuda':\n gpu_name = torch.cuda.get_device_name()\n dct['gpu_name'] = gpu_name\n cudnn_version = torch.backends.cudnn.version()\n else:\n cudnn_version = 'None'\n dct['device'] = str(self.device)\n dct['torch_version'] = torch_version\n dct['cudnn_version'] = cudnn_version\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n @staticmethod\n def build_network_architecture(plans_manager: PlansManager,\n dataset_json,\n configuration_manager: ConfigurationManager,\n num_input_channels,\n enable_deep_supervision: bool = True) -> nn.Module:\n \"\"\"\n his is where you build the architecture according to the plans. There is no obligation to use\n get_network_from_plans, this is just a utility we use for the nnU-Net default architectures. You can do what\n you want. Even ignore the plans and just return something static (as long as it can process the requested\n patch size)\n but don't bug us with your bugs arising from fiddling with this :-P\n This is the function that is called in inference as well! This is needed so that all network architecture\n variants can be loaded at inference time (inference will use the same nnUNetTrainer that was used for\n training, so if you change the network architecture during training by deriving a new trainer class then\n inference will know about it).\n\n If you need to know how many segmentation outputs your custom architecture needs to have, use the following snippet:\n > label_manager = plans_manager.get_label_manager(dataset_json)\n > label_manager.num_segmentation_heads\n (why so complicated? -> We can have either classical training (classes) or regions. If we have regions,\n the number of outputs is != the number of classes. Also there is the ignore label for which no output\n should be generated. label_manager takes care of all that for you.)\n\n \"\"\"\n return get_network_from_plans(plans_manager, dataset_json, configuration_manager,\n num_input_channels, deep_supervision=enable_deep_supervision)\n\n def _get_deep_supervision_scales(self):\n deep_supervision_scales = list(list(i) for i in 1 / np.cumprod(np.vstack(\n self.configuration_manager.pool_op_kernel_sizes), axis=0))[:-1]\n return deep_supervision_scales\n\n def _set_batch_size_and_oversample(self):\n if not self.is_ddp:\n # set batch size to what the plan says, leave oversample untouched\n self.batch_size = self.configuration_manager.batch_size\n else:\n # batch size is distributed over DDP workers and we need to change oversample_percent for each worker\n batch_sizes = []\n oversample_percents = []\n\n world_size = dist.get_world_size()\n my_rank = dist.get_rank()\n\n global_batch_size = self.configuration_manager.batch_size\n assert global_batch_size >= world_size, 'Cannot run DDP if the batch size is smaller than the number of ' \\\n 'GPUs... Duh.'\n\n batch_size_per_GPU = np.ceil(global_batch_size / world_size).astype(int)\n\n for rank in range(world_size):\n if (rank + 1) * batch_size_per_GPU > global_batch_size:\n batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - global_batch_size)\n else:\n batch_size = batch_size_per_GPU\n\n batch_sizes.append(batch_size)\n\n sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1])\n sample_id_high = np.sum(batch_sizes)\n\n if sample_id_high / global_batch_size < (1 - self.oversample_foreground_percent):\n oversample_percents.append(0.0)\n elif sample_id_low / global_batch_size > (1 - self.oversample_foreground_percent):\n oversample_percents.append(1.0)\n else:\n percent_covered_by_this_rank = sample_id_high / global_batch_size - sample_id_low / global_batch_size\n oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) -\n sample_id_low / global_batch_size) / percent_covered_by_this_rank)\n oversample_percents.append(oversample_percent_here)\n\n print(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n print(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n\n self.batch_size = batch_sizes[my_rank]\n self.oversample_foreground_percent = oversample_percents[my_rank]\n\n def _build_loss(self):\n if self.label_manager.has_regions:\n loss = DC_and_BCE_loss({},\n {'batch_dice': self.configuration_manager.batch_dice,\n 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp},\n use_ignore_label=self.label_manager.ignore_label is not None,\n dice_class=MemoryEfficientSoftDiceLoss)\n else:\n loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])\n weights[-1] = 0\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n weights = weights / weights.sum()\n # now wrap the loss\n loss = DeepSupervisionWrapper(loss, weights)\n return loss\n\n def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):\n \"\"\"\n This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it.\n \"\"\"\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n # todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation)\n if dim == 2:\n do_dummy_2d_data_aug = False\n # todo revisit this parametrization\n if max(patch_size) / min(patch_size) > 1.5:\n rotation_for_DA = {\n 'x': (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n mirror_axes = (0, 1)\n elif dim == 3:\n # todo this is not ideal. We could also have patch_size (64, 16, 128) in which case a full 180deg 2d rot would be bad\n # order of the axes is determined by spacing, not image size\n do_dummy_2d_data_aug = (max(patch_size) / patch_size[0]) > ANISO_THRESHOLD\n if do_dummy_2d_data_aug:\n # why do we rotate 180 deg here all the time? We should also restrict it\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'y': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'z': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n }\n mirror_axes = (0, 1, 2)\n else:\n raise RuntimeError()\n\n # todo this function is stupid. It doesn't even use the correct scale range (we keep things as they were in the\n # old nnunet for now)\n initial_patch_size = get_patch_size(patch_size[-dim:],\n *rotation_for_DA.values(),\n (0.85, 1.25))\n if do_dummy_2d_data_aug:\n initial_patch_size[0] = patch_size[0]\n\n self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}')\n self.inference_allowed_mirroring_axes = mirror_axes\n\n return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes\n\n def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):\n if self.local_rank == 0:\n timestamp = time()\n dt_object = datetime.fromtimestamp(timestamp)\n\n if add_timestamp:\n args = (f\"{dt_object}:\", *args)\n\n successful = False\n max_attempts = 5\n ctr = 0\n while not successful and ctr < max_attempts:\n try:\n with open(self.log_file, 'a+') as f:\n for a in args:\n f.write(str(a))\n f.write(\" \")\n f.write(\"\\n\")\n successful = True\n except IOError:\n print(f\"{datetime.fromtimestamp(timestamp)}: failed to log: \", sys.exc_info())\n sleep(0.5)\n ctr += 1\n if also_print_to_console:\n print(*args)\n elif also_print_to_console:\n print(*args)\n\n def print_plans(self):\n if self.local_rank == 0:\n dct = deepcopy(self.plans_manager.plans)\n del dct['configurations']\n self.print_to_log_file(f\"\\nThis is the configuration used by this \"\n f\"training:\\nConfiguration name: {self.configuration_name}\\n\",\n self.configuration_manager, '\\n', add_timestamp=False)\n self.print_to_log_file('These are the global plan.json settings:\\n', dct, '\\n', add_timestamp=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)\n return optimizer, lr_scheduler\n\n def plot_network_architecture(self):\n if self._do_i_compile():\n self.print_to_log_file(\"Unable to plot network architecture: nnUNet_compile is enabled!\")\n return\n\n if self.local_rank == 0:\n try:\n # raise NotImplementedError('hiddenlayer no longer works and we do not have a viable alternative :-(')\n # pip install git+https://github.com/saugatkandel/hiddenlayer.git\n\n # from torchviz import make_dot\n # # not viable.\n # make_dot(tuple(self.network(torch.rand((1, self.num_input_channels,\n # *self.configuration_manager.patch_size),\n # device=self.device)))).render(\n # join(self.output_folder, \"network_architecture.pdf\"), format='pdf')\n # self.optimizer.zero_grad()\n\n # broken.\n\n import hiddenlayer as hl\n g = hl.build_graph(self.network,\n torch.rand((1, self.num_input_channels,\n *self.configuration_manager.patch_size),\n device=self.device),\n transforms=None)\n g.save(join(self.output_folder, \"network_architecture.pdf\"))\n del g\n except Exception as e:\n self.print_to_log_file(\"Unable to plot network architecture:\")\n self.print_to_log_file(e)\n\n # self.print_to_log_file(\"\\nprinting the network instead:\\n\")\n # self.print_to_log_file(self.network)\n # self.print_to_log_file(\"\\n\")\n finally:\n empty_cache(self.device)\n\n def do_split(self):\n \"\"\"\n The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,\n so always the same) and save it as splits_final.pkl file in the preprocessed data directory.\n Sometimes you may want to create your own split for various reasons. For this you will need to create your own\n splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in\n it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)\n and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to\n use a random 80:20 data split.\n :return:\n \"\"\"\n if self.fold == \"all\":\n # if fold==all then we use all images for training and validation\n case_identifiers = get_case_identifiers(self.preprocessed_dataset_folder)\n tr_keys = case_identifiers\n val_keys = tr_keys\n else:\n splits_file = join(self.preprocessed_dataset_folder_base, \"splits_final.json\")\n dataset = nnUNetDataset(self.preprocessed_dataset_folder, case_identifiers=None,\n num_images_properties_loading_threshold=0,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage)\n # if the split file does not exist we need to create it\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new 5-fold cross-validation split...\")\n splits = []\n all_keys_sorted = np.sort(list(dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append({})\n splits[-1]['train'] = list(train_keys)\n splits[-1]['val'] = list(test_keys)\n save_json(splits, splits_file)\n\n else:\n self.print_to_log_file(\"Using splits from existing split file:\", splits_file)\n splits = load_json(splits_file)\n self.print_to_log_file(f\"The split file contains {len(splits)} splits.\")\n\n self.print_to_log_file(\"Desired fold for training: %d\" % self.fold)\n if self.fold < len(splits):\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n self.print_to_log_file(\"This split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n else:\n self.print_to_log_file(\"INFO: You requested fold %d for training but splits \"\n \"contain only %d folds. I am now creating a \"\n \"random (but seeded) 80:20 split!\" % (self.fold, len(splits)))\n # if we request a fold that is not in the split file, create a random 80:20 split\n rnd = np.random.RandomState(seed=12345 + self.fold)\n keys = np.sort(list(dataset.keys()))\n idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)\n idx_val = [i for i in range(len(keys)) if i not in idx_tr]\n tr_keys = [keys[i] for i in idx_tr]\n val_keys = [keys[i] for i in idx_val]\n self.print_to_log_file(\"This random 80:20 split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n if any([i in val_keys for i in tr_keys]):\n self.print_to_log_file('WARNING: Some validation cases are also in the training set. Please check the '\n 'splits.json or ignore if this is intentional.')\n return tr_keys, val_keys\n\n def get_tr_and_val_datasets(self):\n # create dataset split\n tr_keys, val_keys = self.do_split()\n\n # load the datasets for training and validation. Note that we always draw random samples so we really don't\n # care about distributing training cases across GPUs.\n dataset_tr = nnUNetDataset(self.preprocessed_dataset_folder, tr_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n return dataset_tr, dataset_val\n\n def get_dataloaders(self):\n # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether\n # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n\n # needed for deep supervision: how much do we need to downscale the segmentation targets for the different\n # outputs?\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \\\n self.configure_rotation_dummyDA_mirroring_and_inital_patch_size()\n\n # training pipeline\n tr_transforms = self.get_training_transforms(\n patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug,\n order_resampling_data=3, order_resampling_seg=1,\n use_mask_for_norm=self.configuration_manager.use_mask_for_norm,\n is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n # validation pipeline\n val_transforms = self.get_validation_transforms(deep_supervision_scales,\n is_cascaded=self.is_cascaded,\n foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if\n self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim)\n\n allowed_num_processes = get_allowed_n_proc_DA()\n if allowed_num_processes == 0:\n mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms)\n mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms)\n else:\n mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, data_loader=dl_tr, transform=tr_transforms,\n num_processes=allowed_num_processes, num_cached=6, seeds=None,\n pin_memory=self.device.type == 'cuda', wait_time=0.02)\n mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, data_loader=dl_val,\n transform=val_transforms, num_processes=max(1, allowed_num_processes // 2),\n num_cached=3, seeds=None, pin_memory=self.device.type == 'cuda',\n wait_time=0.02)\n return mt_gen_train, mt_gen_val\n\n def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int):\n dataset_tr, dataset_val = self.get_tr_and_val_datasets()\n\n if dim == 2:\n dl_tr = nnUNetDataLoader2D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader2D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n else:\n dl_tr = nnUNetDataLoader3D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader3D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n return dl_tr, dl_val\n\n @staticmethod\n def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],\n rotation_for_DA: dict,\n deep_supervision_scales: Union[List, Tuple],\n mirror_axes: Tuple[int, ...],\n do_dummy_2d_data_aug: bool,\n order_resampling_data: int = 3,\n order_resampling_seg: int = 1,\n border_val_seg: int = -1,\n use_mask_for_norm: List[bool] = None,\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n tr_transforms = []\n if do_dummy_2d_data_aug:\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),\n do_rotation=True, angle_x=rotation_for_DA['x'], angle_y=rotation_for_DA['y'], angle_z=rotation_for_DA['z'],\n p_rot_per_axis=1, # todo experiment with this\n do_scale=True, scale=(0.7, 1.4),\n border_mode_data=\"constant\", border_cval_data=0, order_data=order_resampling_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg, order_seg=order_resampling_seg,\n random_crop=False, # random cropping is part of our dataloaders\n p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,\n independent_scale_for_each_axis=False # todo experiment with this\n ))\n\n if do_dummy_2d_data_aug:\n tr_transforms.append(Convert2DTo3DTransform())\n\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,\n p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))\n tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))\n\n if mirror_axes is not None and len(mirror_axes) > 0:\n tr_transforms.append(MirrorTransform(mirror_axes))\n\n if use_mask_for_norm is not None and any(use_mask_for_norm):\n tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]],\n mask_idx_in_seg=0, set_outside_to=0))\n\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n assert foreground_labels is not None, 'We need foreground_labels for cascade augmentations'\n tr_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n tr_transforms.append(ApplyRandomBinaryOperatorTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n p_per_sample=0.4,\n key=\"data\",\n strel_size=(1, 8),\n p_per_label=1))\n tr_transforms.append(\n RemoveRandomConnectedComponentFromOneHotEncodingTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n key=\"data\",\n p_per_sample=0.2,\n fill_with_other_class_p=0,\n dont_do_if_covers_more_than_x_percent=0.15))\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n tr_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n return tr_transforms\n\n @staticmethod\n def get_validation_transforms(deep_supervision_scales: Union[List, Tuple],\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n val_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n val_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n return val_transforms\n\n def set_deep_supervision_enabled(self, enabled: bool):\n \"\"\"\n This function is specific for the default architecture in nnU-Net. If you change the architecture, there are\n chances you need to change this as well!\n \"\"\"\n if self.is_ddp:\n self.network.module.decoder.deep_supervision = enabled\n else:\n self.network.decoder.deep_supervision = enabled\n\n def on_train_start(self):\n if not self.was_initialized:\n self.initialize()\n\n maybe_mkdir_p(self.output_folder)\n\n # make sure deep supervision is on in the network\n self.set_deep_supervision_enabled(True)\n\n self.print_plans()\n empty_cache(self.device)\n\n # maybe unpack\n if self.unpack_dataset and self.local_rank == 0:\n self.print_to_log_file('unpacking dataset...')\n unpack_dataset(self.preprocessed_dataset_folder, unpack_segmentation=True, overwrite_existing=False,\n num_processes=max(1, round(get_allowed_n_proc_DA() // 2)))\n self.print_to_log_file('unpacking done...')\n\n if self.is_ddp:\n dist.barrier()\n\n # dataloaders must be instantiated here because they need access to the training data which may not be present\n # when doing inference\n self.dataloader_train, self.dataloader_val = self.get_dataloaders()\n\n # copy plans and dataset.json so that they can be used for restoring everything we need for inference\n save_json(self.plans_manager.plans, join(self.output_folder_base, 'plans.json'), sort_keys=False)\n save_json(self.dataset_json, join(self.output_folder_base, 'dataset.json'), sort_keys=False)\n\n # we don't really need the fingerprint but its still handy to have it with the others\n shutil.copy(join(self.preprocessed_dataset_folder_base, 'dataset_fingerprint.json'),\n join(self.output_folder_base, 'dataset_fingerprint.json'))\n\n # produces a pdf in output folder\n self.plot_network_architecture()\n\n self._save_debug_information()\n\n # print(f\"batch size: {self.batch_size}\")\n # print(f\"oversample: {self.oversample_foreground_percent}\")\n\n def on_train_end(self):\n # dirty hack because on_epoch_end increments the epoch counter and this is executed afterwards.\n # This will lead to the wrong current epoch to be stored\n self.current_epoch -= 1\n self.save_checkpoint(join(self.output_folder, \"checkpoint_final.pth\"))\n self.current_epoch += 1\n\n # now we can delete latest\n if self.local_rank == 0 and isfile(join(self.output_folder, \"checkpoint_latest.pth\")):\n os.remove(join(self.output_folder, \"checkpoint_latest.pth\"))\n\n # shut down dataloaders\n old_stdout = sys.stdout\n with open(os.devnull, 'w') as f:\n sys.stdout = f\n if self.dataloader_train is not None:\n self.dataloader_train._finish()\n if self.dataloader_val is not None:\n self.dataloader_val._finish()\n sys.stdout = old_stdout\n\n empty_cache(self.device)\n self.print_to_log_file(\"Training done.\")\n\n def on_train_epoch_start(self):\n self.network.train()\n self.lr_scheduler.step(self.current_epoch)\n self.print_to_log_file('')\n self.print_to_log_file(f'Epoch {self.current_epoch}')\n self.print_to_log_file(\n f\"Current learning rate: {np.round(self.optimizer.param_groups[0]['lr'], decimals=5)}\")\n # lrs are the same for all workers so we don't need to gather them in case of DDP training\n self.logger.log('lrs', self.optimizer.param_groups[0]['lr'], self.current_epoch)\n\n def train_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad(set_to_none=True)\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n # del data\n l = self.loss(output, target)\n\n if self.grad_scaler is not None:\n self.grad_scaler.scale(l).backward()\n self.grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n else:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n return {'loss': l.detach().cpu().numpy()}\n\n def on_train_epoch_end(self, train_outputs: List[dict]):\n outputs = collate_outputs(train_outputs)\n\n if self.is_ddp:\n losses_tr = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(losses_tr, outputs['loss'])\n loss_here = np.vstack(losses_tr).mean()\n else:\n loss_here = np.mean(outputs['loss'])\n\n self.logger.log('train_losses', loss_here, self.current_epoch)\n\n def on_validation_epoch_start(self):\n self.network.eval()\n\n def validation_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n # we only need the output with the highest output resolution\n output = output[0]\n target = target[0]\n\n # the following is needed for online evaluation. Fake dice (green line)\n axes = [0] + list(range(2, output.ndim))\n\n if self.label_manager.has_regions:\n predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()\n else:\n # no need for softmax\n output_seg = output.argmax(1)[:, None]\n predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32)\n predicted_segmentation_onehot.scatter_(1, output_seg, 1)\n del output_seg\n\n if self.label_manager.has_ignore_label:\n if not self.label_manager.has_regions:\n mask = (target != self.label_manager.ignore_label).float()\n # CAREFUL that you don't rely on target after this line!\n target[target == self.label_manager.ignore_label] = 0\n else:\n mask = 1 - target[:, -1:]\n # CAREFUL that you don't rely on target after this line!\n target = target[:, :-1]\n else:\n mask = None\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)\n\n tp_hard = tp.detach().cpu().numpy()\n fp_hard = fp.detach().cpu().numpy()\n fn_hard = fn.detach().cpu().numpy()\n if not self.label_manager.has_regions:\n # if we train with regions all segmentation heads predict some kind of foreground. In conventional\n # (softmax training) there needs tobe one output for the background. We are not interested in the\n # background Dice\n # [1:] in order to remove background\n tp_hard = tp_hard[1:]\n fp_hard = fp_hard[1:]\n fn_hard = fn_hard[1:]\n\n return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard}\n\n def on_validation_epoch_end(self, val_outputs: List[dict]):\n outputs_collated = collate_outputs(val_outputs)\n tp = np.sum(outputs_collated['tp_hard'], 0)\n fp = np.sum(outputs_collated['fp_hard'], 0)\n fn = np.sum(outputs_collated['fn_hard'], 0)\n\n if self.is_ddp:\n world_size = dist.get_world_size()\n\n tps = [None for _ in range(world_size)]\n dist.all_gather_object(tps, tp)\n tp = np.vstack([i[None] for i in tps]).sum(0)\n\n fps = [None for _ in range(world_size)]\n dist.all_gather_object(fps, fp)\n fp = np.vstack([i[None] for i in fps]).sum(0)\n\n fns = [None for _ in range(world_size)]\n dist.all_gather_object(fns, fn)\n fn = np.vstack([i[None] for i in fns]).sum(0)\n\n losses_val = [None for _ in range(world_size)]\n dist.all_gather_object(losses_val, outputs_collated['loss'])\n loss_here = np.vstack(losses_val).mean()\n else:\n loss_here = np.mean(outputs_collated['loss'])\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(tp, fp, fn)]]\n mean_fg_dice = np.nanmean(global_dc_per_class)\n self.logger.log('mean_fg_dice', mean_fg_dice, self.current_epoch)\n self.logger.log('dice_per_class_or_region', global_dc_per_class, self.current_epoch)\n self.logger.log('val_losses', loss_here, self.current_epoch)\n\n def on_epoch_start(self):\n self.logger.log('epoch_start_timestamps', time(), self.current_epoch)\n\n def on_epoch_end(self):\n self.logger.log('epoch_end_timestamps', time(), self.current_epoch)\n\n # todo find a solution for this stupid shit\n self.print_to_log_file('train_loss', np.round(self.logger.my_fantastic_logging['train_losses'][-1], decimals=4))\n self.print_to_log_file('val_loss', np.round(self.logger.my_fantastic_logging['val_losses'][-1], decimals=4))\n self.print_to_log_file('Pseudo dice', [np.round(i, decimals=4) for i in\n self.logger.my_fantastic_logging['dice_per_class_or_region'][-1]])\n self.print_to_log_file(\n f\"Epoch time: {np.round(self.logger.my_fantastic_logging['epoch_end_timestamps'][-1] - self.logger.my_fantastic_logging['epoch_start_timestamps'][-1], decimals=2)} s\")\n\n # handling periodic checkpointing\n current_epoch = self.current_epoch\n if (current_epoch + 1) % self.save_every == 0 and current_epoch != (self.num_epochs - 1):\n self.save_checkpoint(join(self.output_folder, 'checkpoint_latest.pth'))\n\n # handle 'best' checkpointing. ema_fg_dice is computed by the logger and can be accessed like this\n if self._best_ema is None or self.logger.my_fantastic_logging['ema_fg_dice'][-1] > self._best_ema:\n self._best_ema = self.logger.my_fantastic_logging['ema_fg_dice'][-1]\n self.print_to_log_file(f\"Yayy! New best EMA pseudo Dice: {np.round(self._best_ema, decimals=4)}\")\n self.save_checkpoint(join(self.output_folder, 'checkpoint_best.pth'))\n\n if self.local_rank == 0:\n self.logger.plot_progress_png(self.output_folder)\n\n self.current_epoch += 1\n\n def save_checkpoint(self, filename: str) -> None:\n if self.local_rank == 0:\n if not self.disable_checkpointing:\n if self.is_ddp:\n mod = self.network.module\n else:\n mod = self.network\n if isinstance(mod, OptimizedModule):\n mod = mod._orig_mod\n\n checkpoint = {\n 'network_weights': mod.state_dict(),\n 'optimizer_state': self.optimizer.state_dict(),\n 'grad_scaler_state': self.grad_scaler.state_dict() if self.grad_scaler is not None else None,\n 'logging': self.logger.get_checkpoint(),\n '_best_ema': self._best_ema,\n 'current_epoch': self.current_epoch + 1,\n 'init_args': self.my_init_kwargs,\n 'trainer_name': self.__class__.__name__,\n 'inference_allowed_mirroring_axes': self.inference_allowed_mirroring_axes,\n }\n torch.save(checkpoint, filename)\n else:\n self.print_to_log_file('No checkpoint written, checkpointing is disabled')\n\n def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) -> None:\n if not self.was_initialized:\n self.initialize()\n\n if isinstance(filename_or_checkpoint, str):\n checkpoint = torch.load(filename_or_checkpoint, map_location=self.device)\n # if state dict comes from nn.DataParallel but we use non-parallel model here then the state dict keys do not\n # match. Use heuristic to make it match\n new_state_dict = {}\n for k, value in checkpoint['network_weights'].items():\n key = k\n if key not in self.network.state_dict().keys() and key.startswith('module.'):\n key = key[7:]\n new_state_dict[key] = value\n\n self.my_init_kwargs = checkpoint['init_args']\n self.current_epoch = checkpoint['current_epoch']\n self.logger.load_checkpoint(checkpoint['logging'])\n self._best_ema = checkpoint['_best_ema']\n self.inference_allowed_mirroring_axes = checkpoint[\n 'inference_allowed_mirroring_axes'] if 'inference_allowed_mirroring_axes' in checkpoint.keys() else self.inference_allowed_mirroring_axes\n\n # messing with state dict naming schemes. Facepalm.\n if self.is_ddp:\n if isinstance(self.network.module, OptimizedModule):\n self.network.module._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.module.load_state_dict(new_state_dict)\n else:\n if isinstance(self.network, OptimizedModule):\n self.network._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.load_state_dict(new_state_dict)\n self.optimizer.load_state_dict(checkpoint['optimizer_state'])\n if self.grad_scaler is not None:\n if checkpoint['grad_scaler_state'] is not None:\n self.grad_scaler.load_state_dict(checkpoint['grad_scaler_state'])\n\n def perform_actual_validation(self, save_probabilities: bool = False):\n self.set_deep_supervision_enabled(False)\n self.network.eval()\n\n predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True,\n perform_everything_on_gpu=True, device=self.device, verbose=False,\n verbose_preprocessing=False, allow_tqdm=False)\n predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None,\n self.dataset_json, self.__class__.__name__,\n self.inference_allowed_mirroring_axes)\n\n with multiprocessing.get_context(\"spawn\").Pool(default_num_processes) as segmentation_export_pool:\n worker_list = [i for i in segmentation_export_pool._pool]\n validation_output_folder = join(self.output_folder, 'validation')\n maybe_mkdir_p(validation_output_folder)\n\n # we cannot use self.get_tr_and_val_datasets() here because we might be DDP and then we have to distribute\n # the validation keys across the workers.\n _, val_keys = self.do_split()\n if self.is_ddp:\n val_keys = val_keys[self.local_rank:: dist.get_world_size()]\n\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n\n next_stages = self.configuration_manager.next_stage_names\n\n if next_stages is not None:\n _ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages]\n\n results = []\n\n for k in dataset_val.keys():\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n while not proceed:\n sleep(0.1)\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n\n self.print_to_log_file(f\"predicting {k}\")\n data, seg, properties = dataset_val.load_case(k)\n\n if self.is_cascaded:\n data = np.vstack((data, convert_labelmap_to_one_hot(seg[-1], self.label_manager.foreground_labels,\n output_dtype=data.dtype)))\n with warnings.catch_warnings():\n # ignore 'The given NumPy array is not writable' warning\n warnings.simplefilter(\"ignore\")\n data = torch.from_numpy(data)\n\n output_filename_truncated = join(validation_output_folder, k)\n\n try:\n prediction = predictor.predict_sliding_window_return_logits(data)\n except RuntimeError:\n predictor.perform_everything_on_gpu = False\n prediction = predictor.predict_sliding_window_return_logits(data)\n predictor.perform_everything_on_gpu = True\n\n prediction = prediction.cpu()\n\n # this needs to go into background processes\n results.append(\n segmentation_export_pool.starmap_async(\n export_prediction_from_logits, (\n (prediction, properties, self.configuration_manager, self.plans_manager,\n self.dataset_json, output_filename_truncated, save_probabilities),\n )\n )\n )\n # for debug purposes\n # export_prediction(prediction_for_export, properties, self.configuration, self.plans, self.dataset_json,\n # output_filename_truncated, save_probabilities)\n\n # if needed, export the softmax prediction for the next stage\n if next_stages is not None:\n for n in next_stages:\n next_stage_config_manager = self.plans_manager.get_configuration(n)\n expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name,\n next_stage_config_manager.data_identifier)\n\n try:\n # we do this so that we can use load_case and do not have to hard code how loading training cases is implemented\n tmp = nnUNetDataset(expected_preprocessed_folder, [k],\n num_images_properties_loading_threshold=0)\n d, s, p = tmp.load_case(k)\n except FileNotFoundError:\n self.print_to_log_file(\n f\"Predicting next stage {n} failed for case {k} because the preprocessed file is missing! \"\n f\"Run the preprocessing for this configuration first!\")\n continue\n\n target_shape = d.shape[1:]\n output_folder = join(self.output_folder_base, 'predicted_next_stage', n)\n output_file = join(output_folder, k + '.npz')\n\n # resample_and_save(prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties,\n # self.dataset_json)\n results.append(segmentation_export_pool.starmap_async(\n resample_and_save, (\n (prediction, target_shape, output_file, self.plans_manager,\n self.configuration_manager,\n properties,\n self.dataset_json),\n )\n ))\n\n _ = [r.get() for r in results]\n\n if self.is_ddp:\n dist.barrier()\n\n if self.local_rank == 0:\n metrics = compute_metrics_on_folder(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'),\n validation_output_folder,\n join(validation_output_folder, 'summary.json'),\n self.plans_manager.image_reader_writer_class(),\n self.dataset_json[\"file_ending\"],\n self.label_manager.foreground_regions if self.label_manager.has_regions else\n self.label_manager.foreground_labels,\n self.label_manager.ignore_label, chill=True)\n self.print_to_log_file(\"Validation complete\", also_print_to_console=True)\n self.print_to_log_file(\"Mean Validation Dice: \", (metrics['foreground_mean'][\"Dice\"]), also_print_to_console=True)\n\n self.set_deep_supervision_enabled(True)\n compute_gaussian.cache_clear()\n\n def run_training(self):\n self.on_train_start()\n\n for epoch in range(self.current_epoch, self.num_epochs):\n self.on_epoch_start()\n\n self.on_train_epoch_start()\n train_outputs = []\n for batch_id in range(self.num_iterations_per_epoch):\n train_outputs.append(self.train_step(next(self.dataloader_train)))\n self.on_train_epoch_end(train_outputs)\n\n with torch.no_grad():\n self.on_validation_epoch_start()\n val_outputs = []\n for batch_id in range(self.num_val_iterations_per_epoch):\n val_outputs.append(self.validation_step(next(self.dataloader_val)))\n self.on_validation_epoch_end(val_outputs)\n\n self.on_epoch_end()\n\n self.on_train_end()" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" } ]
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,114
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions:
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions:
loss = DC_and_BCE_loss({},
0
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/neuralhmm_tts.py
[ { "identifier": "Encoder", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class Encoder(nn.Module):\n r\"\"\"Neural HMM Encoder\n\n Same as Tacotron 2 encoder but increases the input length by states per phone\n\n Args:\n num_chars (int): Number of characters in the input.\n state_per_phone (int): Number of states per phone.\n in_out_channels (int): number of input and output channels.\n n_convolutions (int): number of convolutional layers.\n \"\"\"\n\n def __init__(self, num_chars, state_per_phone, in_out_channels=512, n_convolutions=3):\n super().__init__()\n\n self.state_per_phone = state_per_phone\n self.in_out_channels = in_out_channels\n\n self.emb = nn.Embedding(num_chars, in_out_channels)\n self.convolutions = nn.ModuleList()\n for _ in range(n_convolutions):\n self.convolutions.append(ConvBNBlock(in_out_channels, in_out_channels, 5, \"relu\"))\n self.lstm = nn.LSTM(\n in_out_channels,\n int(in_out_channels / 2) * state_per_phone,\n num_layers=1,\n batch_first=True,\n bias=True,\n bidirectional=True,\n )\n self.rnn_state = None\n\n def forward(self, x: torch.FloatTensor, x_len: torch.LongTensor) -> Tuple[torch.FloatTensor, torch.LongTensor]:\n \"\"\"Forward pass to the encoder.\n\n Args:\n x (torch.FloatTensor): input text indices.\n - shape: :math:`(b, T_{in})`\n x_len (torch.LongTensor): input text lengths.\n - shape: :math:`(b,)`\n\n Returns:\n Tuple[torch.FloatTensor, torch.LongTensor]: encoder outputs and output lengths.\n -shape: :math:`((b, T_{in} * states_per_phone, in_out_channels), (b,))`\n \"\"\"\n b, T = x.shape\n o = self.emb(x).transpose(1, 2)\n for layer in self.convolutions:\n o = layer(o)\n o = o.transpose(1, 2)\n o = nn.utils.rnn.pack_padded_sequence(o, x_len.cpu(), batch_first=True)\n self.lstm.flatten_parameters()\n o, _ = self.lstm(o)\n o, _ = nn.utils.rnn.pad_packed_sequence(o, batch_first=True)\n o = o.reshape(b, T * self.state_per_phone, self.in_out_channels)\n x_len = x_len * self.state_per_phone\n return o, x_len\n\n def inference(self, x, x_len):\n \"\"\"Inference to the encoder.\n\n Args:\n x (torch.FloatTensor): input text indices.\n - shape: :math:`(b, T_{in})`\n x_len (torch.LongTensor): input text lengths.\n - shape: :math:`(b,)`\n\n Returns:\n Tuple[torch.FloatTensor, torch.LongTensor]: encoder outputs and output lengths.\n -shape: :math:`((b, T_{in} * states_per_phone, in_out_channels), (b,))`\n \"\"\"\n b, T = x.shape\n o = self.emb(x).transpose(1, 2)\n for layer in self.convolutions:\n o = layer(o)\n o = o.transpose(1, 2)\n # self.lstm.flatten_parameters()\n o, _ = self.lstm(o)\n o = o.reshape(b, T * self.state_per_phone, self.in_out_channels)\n x_len = x_len * self.state_per_phone\n return o, x_len" }, { "identifier": "OverflowUtils", "path": "TTS/tts/layers/overflow/common_layers.py", "snippet": "class OverflowUtils:\n @staticmethod\n def get_data_parameters_for_flat_start(\n data_loader: torch.utils.data.DataLoader, out_channels: int, states_per_phone: int\n ):\n \"\"\"Generates data parameters for flat starting the HMM.\n\n Args:\n data_loader (torch.utils.data.Dataloader): _description_\n out_channels (int): mel spectrogram channels\n states_per_phone (_type_): HMM states per phone\n \"\"\"\n\n # State related information for transition_p\n total_state_len = 0\n total_mel_len = 0\n\n # Useful for data mean an std\n total_mel_sum = 0\n total_mel_sq_sum = 0\n\n for batch in tqdm(data_loader, leave=False):\n text_lengths = batch[\"token_id_lengths\"]\n mels = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n\n total_state_len += torch.sum(text_lengths)\n total_mel_len += torch.sum(mel_lengths)\n total_mel_sum += torch.sum(mels)\n total_mel_sq_sum += torch.sum(torch.pow(mels, 2))\n\n data_mean = total_mel_sum / (total_mel_len * out_channels)\n data_std = torch.sqrt((total_mel_sq_sum / (total_mel_len * out_channels)) - torch.pow(data_mean, 2))\n average_num_states = total_state_len / len(data_loader.dataset)\n average_mel_len = total_mel_len / len(data_loader.dataset)\n average_duration_each_state = average_mel_len / average_num_states\n init_transition_prob = 1 / average_duration_each_state\n\n return data_mean, data_std, (init_transition_prob * states_per_phone)\n\n @staticmethod\n @torch.no_grad()\n def update_flat_start_transition(model, transition_p):\n model.neural_hmm.output_net.parametermodel.flat_start_output_layer(0.0, 1.0, transition_p)\n\n @staticmethod\n def log_clamped(x, eps=1e-04):\n \"\"\"\n Avoids the log(0) problem\n\n Args:\n x (torch.tensor): input tensor\n eps (float, optional): lower bound. Defaults to 1e-04.\n\n Returns:\n torch.tensor: :math:`log(x)`\n \"\"\"\n clamped_x = torch.clamp(x, min=eps)\n return torch.log(clamped_x)\n\n @staticmethod\n def inverse_sigmod(x):\n r\"\"\"\n Inverse of the sigmoid function\n \"\"\"\n if not torch.is_tensor(x):\n x = torch.tensor(x)\n return OverflowUtils.log_clamped(x / (1.0 - x))\n\n @staticmethod\n def inverse_softplus(x):\n r\"\"\"\n Inverse of the softplus function\n \"\"\"\n if not torch.is_tensor(x):\n x = torch.tensor(x)\n return OverflowUtils.log_clamped(torch.exp(x) - 1.0)\n\n @staticmethod\n def logsumexp(x, dim):\n r\"\"\"\n Differentiable LogSumExp: Does not creates nan gradients\n when all the inputs are -inf yeilds 0 gradients.\n Args:\n x : torch.Tensor - The input tensor\n dim: int - The dimension on which the log sum exp has to be applied\n \"\"\"\n\n m, _ = x.max(dim=dim)\n mask = m == -float(\"inf\")\n s = (x - m.masked_fill_(mask, 0).unsqueeze(dim=dim)).exp().sum(dim=dim)\n return s.masked_fill_(mask, 1).log() + m.masked_fill_(mask, -float(\"inf\"))\n\n @staticmethod\n def double_pad(list_of_different_shape_tensors):\n r\"\"\"\n Pads the list of tensors in 2 dimensions\n \"\"\"\n second_dim_lens = [len(a) for a in [i[0] for i in list_of_different_shape_tensors]]\n second_dim_max = max(second_dim_lens)\n padded_x = [F.pad(x, (0, second_dim_max - len(x[0]))) for x in list_of_different_shape_tensors]\n return nn.utils.rnn.pad_sequence(padded_x, batch_first=True)" }, { "identifier": "NeuralHMM", "path": "TTS/tts/layers/overflow/neural_hmm.py", "snippet": "class NeuralHMM(nn.Module):\n \"\"\"Autoregressive left to right HMM model primarily used in \"Neural HMMs are all you need (for high-quality attention-free TTS)\"\n\n Paper::\n https://arxiv.org/abs/2108.13320\n\n Paper abstract::\n Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using\n HMMs. However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase\n training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be\n combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right\n no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an\n HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without\n approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting\n example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst\n achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate.\n\n Args:\n frame_channels (int): Output dimension to generate.\n ar_order (int): Autoregressive order of the model. In ablations of Neural HMM it was found that more autoregression while giving more variation hurts naturalness of the synthesised audio.\n deterministic_transition (bool): deterministic duration generation based on duration quantiles as defiend in \"S. Ronanki, O. Watts, S. King, and G. E. Henter, “Medianbased generation of synthetic speech durations using a nonparametric approach,” in Proc. SLT, 2016.\". Defaults to True.\n encoder_dim (int): Channels of encoder input and character embedding tensors. Defaults to 512.\n prenet_type (str): `original` or `bn`. `original` sets the default Prenet and `bn` uses Batch Normalization version of the Prenet.\n prenet_dim (int): Dimension of the Prenet.\n prenet_n_layers (int): Number of layers in the Prenet.\n prenet_dropout (float): Dropout probability of the Prenet.\n prenet_dropout_at_inference (bool): If True, dropout is applied at inference time.\n memory_rnn_dim (int): Size of the memory RNN to process output of prenet.\n outputnet_size (List[int]): Size of the output network inside the neural HMM.\n flat_start_params (dict): Parameters for the flat start initialization of the neural HMM.\n std_floor (float): Floor value for the standard deviation of the neural HMM. Prevents model cheating by putting point mass and getting infinite likelihood at any datapoint.\n use_grad_checkpointing (bool, optional): Use gradient checkpointing to save memory. Defaults to True.\n \"\"\"\n\n def __init__(\n self,\n frame_channels: int,\n ar_order: int,\n deterministic_transition: bool,\n encoder_dim: int,\n prenet_type: str,\n prenet_dim: int,\n prenet_n_layers: int,\n prenet_dropout: float,\n prenet_dropout_at_inference: bool,\n memory_rnn_dim: int,\n outputnet_size: List[int],\n flat_start_params: dict,\n std_floor: float,\n use_grad_checkpointing: bool = True,\n ):\n super().__init__()\n\n self.frame_channels = frame_channels\n self.ar_order = ar_order\n self.deterministic_transition = deterministic_transition\n self.prenet_dim = prenet_dim\n self.memory_rnn_dim = memory_rnn_dim\n self.use_grad_checkpointing = use_grad_checkpointing\n\n self.transition_model = TransitionModel()\n self.emission_model = EmissionModel()\n\n assert ar_order > 0, f\"AR order must be greater than 0 provided {ar_order}\"\n\n self.ar_order = ar_order\n self.prenet = Prenet(\n in_features=frame_channels * ar_order,\n prenet_type=prenet_type,\n prenet_dropout=prenet_dropout,\n dropout_at_inference=prenet_dropout_at_inference,\n out_features=[self.prenet_dim for _ in range(prenet_n_layers)],\n bias=False,\n )\n self.memory_rnn = nn.LSTMCell(input_size=prenet_dim, hidden_size=memory_rnn_dim)\n self.output_net = Outputnet(\n encoder_dim, memory_rnn_dim, frame_channels, outputnet_size, flat_start_params, std_floor\n )\n self.register_buffer(\"go_tokens\", torch.zeros(ar_order, 1))\n\n def forward(self, inputs, inputs_len, mels, mel_lens):\n r\"\"\"HMM forward algorithm for training uses logarithmic version of Rabiner (1989) forward algorithm.\n\n Args:\n inputs (torch.FloatTensor): Encoder outputs\n inputs_len (torch.LongTensor): Encoder output lengths\n mels (torch.FloatTensor): Mel inputs\n mel_lens (torch.LongTensor): Length of mel inputs\n\n Shapes:\n - inputs: (B, T, D_out_enc)\n - inputs_len: (B)\n - mels: (B, D_mel, T_mel)\n - mel_lens: (B)\n\n Returns:\n log_prob (torch.FloatTensor): Log probability of the sequence\n \"\"\"\n # Get dimensions of inputs\n batch_size, N, _ = inputs.shape\n T_max = torch.max(mel_lens)\n mels = mels.permute(0, 2, 1)\n\n # Intialize forward algorithm\n log_state_priors = self._initialize_log_state_priors(inputs)\n log_c, log_alpha_scaled, transition_matrix, means = self._initialize_forward_algorithm_variables(mels, N)\n\n # Initialize autoregression elements\n ar_inputs = self._add_go_token(mels)\n h_memory, c_memory = self._init_lstm_states(batch_size, self.memory_rnn_dim, mels)\n\n for t in range(T_max):\n # Process Autoregression\n h_memory, c_memory = self._process_ar_timestep(t, ar_inputs, h_memory, c_memory)\n # Get mean, std and transition vector from decoder for this timestep\n # Note: Gradient checkpointing currently doesn't works with multiple gpus inside a loop\n if self.use_grad_checkpointing and self.training:\n mean, std, transition_vector = checkpoint(self.output_net, h_memory, inputs)\n else:\n mean, std, transition_vector = self.output_net(h_memory, inputs)\n\n if t == 0:\n log_alpha_temp = log_state_priors + self.emission_model(mels[:, 0], mean, std, inputs_len)\n else:\n log_alpha_temp = self.emission_model(mels[:, t], mean, std, inputs_len) + self.transition_model(\n log_alpha_scaled[:, t - 1, :], transition_vector, inputs_len\n )\n log_c[:, t] = torch.logsumexp(log_alpha_temp, dim=1)\n log_alpha_scaled[:, t, :] = log_alpha_temp - log_c[:, t].unsqueeze(1)\n transition_matrix[:, t] = transition_vector # needed for absorption state calculation\n\n # Save for plotting\n means.append(mean.detach())\n\n log_c, log_alpha_scaled = self._mask_lengths(mel_lens, log_c, log_alpha_scaled)\n\n sum_final_log_c = self.get_absorption_state_scaling_factor(\n mel_lens, log_alpha_scaled, inputs_len, transition_matrix\n )\n\n log_probs = torch.sum(log_c, dim=1) + sum_final_log_c\n\n return log_probs, log_alpha_scaled, transition_matrix, means\n\n @staticmethod\n def _mask_lengths(mel_lens, log_c, log_alpha_scaled):\n \"\"\"\n Mask the lengths of the forward variables so that the variable lenghts\n do not contribute in the loss calculation\n Args:\n mel_inputs (torch.FloatTensor): (batch, T, frame_channels)\n mel_inputs_lengths (torch.IntTensor): (batch)\n log_c (torch.FloatTensor): (batch, T)\n Returns:\n log_c (torch.FloatTensor) : scaled probabilities (batch, T)\n log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N)\n \"\"\"\n mask_log_c = sequence_mask(mel_lens)\n log_c = log_c * mask_log_c\n mask_log_alpha_scaled = mask_log_c.unsqueeze(2)\n log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled\n return log_c, log_alpha_scaled\n\n def _process_ar_timestep(\n self,\n t,\n ar_inputs,\n h_memory,\n c_memory,\n ):\n \"\"\"\n Process autoregression in timestep\n 1. At a specific t timestep\n 2. Perform data dropout if applied (we did not use it)\n 3. Run the autoregressive frame through the prenet (has dropout)\n 4. Run the prenet output through the post prenet rnn\n\n Args:\n t (int): mel-spec timestep\n ar_inputs (torch.FloatTensor): go-token appended mel-spectrograms\n - shape: (b, D_out, T_out)\n h_post_prenet (torch.FloatTensor): previous timestep rnn hidden state\n - shape: (b, memory_rnn_dim)\n c_post_prenet (torch.FloatTensor): previous timestep rnn cell state\n - shape: (b, memory_rnn_dim)\n\n Returns:\n h_post_prenet (torch.FloatTensor): rnn hidden state of the current timestep\n c_post_prenet (torch.FloatTensor): rnn cell state of the current timestep\n \"\"\"\n prenet_input = ar_inputs[:, t : t + self.ar_order].flatten(1)\n memory_inputs = self.prenet(prenet_input)\n h_memory, c_memory = self.memory_rnn(memory_inputs, (h_memory, c_memory))\n return h_memory, c_memory\n\n def _add_go_token(self, mel_inputs):\n \"\"\"Append the go token to create the autoregressive input\n Args:\n mel_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)\n Returns:\n ar_inputs (torch.FloatTensor): (batch_size, T, n_mel_channel)\n \"\"\"\n batch_size, T, _ = mel_inputs.shape\n go_tokens = self.go_tokens.unsqueeze(0).expand(batch_size, self.ar_order, self.frame_channels)\n ar_inputs = torch.cat((go_tokens, mel_inputs), dim=1)[:, :T]\n return ar_inputs\n\n @staticmethod\n def _initialize_forward_algorithm_variables(mel_inputs, N):\n r\"\"\"Initialize placeholders for forward algorithm variables, to use a stable\n version we will use log_alpha_scaled and the scaling constant\n\n Args:\n mel_inputs (torch.FloatTensor): (b, T_max, frame_channels)\n N (int): number of states\n Returns:\n log_c (torch.FloatTensor): Scaling constant (b, T_max)\n \"\"\"\n b, T_max, _ = mel_inputs.shape\n log_alpha_scaled = mel_inputs.new_zeros((b, T_max, N))\n log_c = mel_inputs.new_zeros(b, T_max)\n transition_matrix = mel_inputs.new_zeros((b, T_max, N))\n\n # Saving for plotting later, will not have gradient tapes\n means = []\n return log_c, log_alpha_scaled, transition_matrix, means\n\n @staticmethod\n def _init_lstm_states(batch_size, hidden_state_dim, device_tensor):\n r\"\"\"\n Initialize Hidden and Cell states for LSTM Cell\n\n Args:\n batch_size (Int): batch size\n hidden_state_dim (Int): dimensions of the h and c\n device_tensor (torch.FloatTensor): useful for the device and type\n\n Returns:\n (torch.FloatTensor): shape (batch_size, hidden_state_dim)\n can be hidden state for LSTM\n (torch.FloatTensor): shape (batch_size, hidden_state_dim)\n can be the cell state for LSTM\n \"\"\"\n return (\n device_tensor.new_zeros(batch_size, hidden_state_dim),\n device_tensor.new_zeros(batch_size, hidden_state_dim),\n )\n\n def get_absorption_state_scaling_factor(self, mels_len, log_alpha_scaled, inputs_len, transition_vector):\n \"\"\"Returns the final scaling factor of absorption state\n\n Args:\n mels_len (torch.IntTensor): Input size of mels to\n get the last timestep of log_alpha_scaled\n log_alpha_scaled (torch.FloatTEnsor): State probabilities\n text_lengths (torch.IntTensor): length of the states to\n mask the values of states lengths\n (\n Useful when the batch has very different lengths,\n when the length of an observation is less than\n the number of max states, then the log alpha after\n the state value is filled with -infs. So we mask\n those values so that it only consider the states\n which are needed for that length\n )\n transition_vector (torch.FloatTensor): transtiion vector for each state per timestep\n\n Shapes:\n - mels_len: (batch_size)\n - log_alpha_scaled: (batch_size, N, T)\n - text_lengths: (batch_size)\n - transition_vector: (batch_size, N, T)\n\n Returns:\n sum_final_log_c (torch.FloatTensor): (batch_size)\n\n \"\"\"\n N = torch.max(inputs_len)\n max_inputs_len = log_alpha_scaled.shape[2]\n state_lengths_mask = sequence_mask(inputs_len, max_len=max_inputs_len)\n\n last_log_alpha_scaled_index = (\n (mels_len - 1).unsqueeze(-1).expand(-1, N).unsqueeze(1)\n ) # Batch X Hidden State Size\n last_log_alpha_scaled = torch.gather(log_alpha_scaled, 1, last_log_alpha_scaled_index).squeeze(1)\n last_log_alpha_scaled = last_log_alpha_scaled.masked_fill(~state_lengths_mask, -float(\"inf\"))\n\n last_transition_vector = torch.gather(transition_vector, 1, last_log_alpha_scaled_index).squeeze(1)\n last_transition_probability = torch.sigmoid(last_transition_vector)\n log_probability_of_transitioning = OverflowUtils.log_clamped(last_transition_probability)\n\n last_transition_probability_index = self.get_mask_for_last_item(inputs_len, inputs_len.device)\n log_probability_of_transitioning = log_probability_of_transitioning.masked_fill(\n ~last_transition_probability_index, -float(\"inf\")\n )\n final_log_c = last_log_alpha_scaled + log_probability_of_transitioning\n\n # If the length of the mel is less than the number of states it will select the -inf values leading to nan gradients\n # Ideally, we should clean the dataset otherwise this is a little hack uncomment the line below\n final_log_c = final_log_c.clamp(min=torch.finfo(final_log_c.dtype).min)\n\n sum_final_log_c = torch.logsumexp(final_log_c, dim=1)\n return sum_final_log_c\n\n @staticmethod\n def get_mask_for_last_item(lengths, device, out_tensor=None):\n \"\"\"Returns n-1 mask for the last item in the sequence.\n\n Args:\n lengths (torch.IntTensor): lengths in a batch\n device (str, optional): Defaults to \"cpu\".\n out_tensor (torch.Tensor, optional): uses the memory of a specific tensor.\n Defaults to None.\n\n Returns:\n - Shape: :math:`(b, max_len)`\n \"\"\"\n max_len = torch.max(lengths).item()\n ids = (\n torch.arange(0, max_len, device=device) if out_tensor is None else torch.arange(0, max_len, out=out_tensor)\n )\n mask = ids == lengths.unsqueeze(1) - 1\n return mask\n\n @torch.inference_mode()\n def inference(\n self,\n inputs: torch.FloatTensor,\n input_lens: torch.LongTensor,\n sampling_temp: float,\n max_sampling_time: int,\n duration_threshold: float,\n ):\n \"\"\"Inference from autoregressive neural HMM\n\n Args:\n inputs (torch.FloatTensor): input states\n - shape: :math:`(b, T, d)`\n input_lens (torch.LongTensor): input state lengths\n - shape: :math:`(b)`\n sampling_temp (float): sampling temperature\n max_sampling_temp (int): max sampling temperature\n duration_threshold (float): duration threshold to switch to next state\n - Use this to change the spearking rate of the synthesised audio\n \"\"\"\n\n b = inputs.shape[0]\n outputs = {\n \"hmm_outputs\": [],\n \"hmm_outputs_len\": [],\n \"alignments\": [],\n \"input_parameters\": [],\n \"output_parameters\": [],\n }\n for i in range(b):\n neural_hmm_outputs, states_travelled, input_parameters, output_parameters = self.sample(\n inputs[i : i + 1], input_lens[i], sampling_temp, max_sampling_time, duration_threshold\n )\n\n outputs[\"hmm_outputs\"].append(neural_hmm_outputs)\n outputs[\"hmm_outputs_len\"].append(neural_hmm_outputs.shape[0])\n outputs[\"alignments\"].append(states_travelled)\n outputs[\"input_parameters\"].append(input_parameters)\n outputs[\"output_parameters\"].append(output_parameters)\n\n outputs[\"hmm_outputs\"] = nn.utils.rnn.pad_sequence(outputs[\"hmm_outputs\"], batch_first=True)\n outputs[\"hmm_outputs_len\"] = torch.tensor(\n outputs[\"hmm_outputs_len\"], dtype=input_lens.dtype, device=input_lens.device\n )\n return outputs\n\n @torch.inference_mode()\n def sample(self, inputs, input_lens, sampling_temp, max_sampling_time, duration_threshold):\n \"\"\"Samples an output from the parameter models\n\n Args:\n inputs (torch.FloatTensor): input states\n - shape: :math:`(1, T, d)`\n input_lens (torch.LongTensor): input state lengths\n - shape: :math:`(1)`\n sampling_temp (float): sampling temperature\n max_sampling_time (int): max sampling time\n duration_threshold (float): duration threshold to switch to next state\n\n Returns:\n outputs (torch.FloatTensor): Output Observations\n - Shape: :math:`(T, output_dim)`\n states_travelled (list[int]): Hidden states travelled\n - Shape: :math:`(T)`\n input_parameters (list[torch.FloatTensor]): Input parameters\n output_parameters (list[torch.FloatTensor]): Output parameters\n \"\"\"\n states_travelled, outputs, t = [], [], 0\n\n # Sample initial state\n current_state = 0\n states_travelled.append(current_state)\n\n # Prepare autoregression\n prenet_input = self.go_tokens.unsqueeze(0).expand(1, self.ar_order, self.frame_channels)\n h_memory, c_memory = self._init_lstm_states(1, self.memory_rnn_dim, prenet_input)\n\n input_parameter_values = []\n output_parameter_values = []\n quantile = 1\n while True:\n memory_input = self.prenet(prenet_input.flatten(1).unsqueeze(0))\n # will be 1 while sampling\n h_memory, c_memory = self.memory_rnn(memory_input.squeeze(0), (h_memory, c_memory))\n\n z_t = inputs[:, current_state].unsqueeze(0) # Add fake time dimension\n mean, std, transition_vector = self.output_net(h_memory, z_t)\n\n transition_probability = torch.sigmoid(transition_vector.flatten())\n staying_probability = torch.sigmoid(-transition_vector.flatten())\n\n # Save for plotting\n input_parameter_values.append([prenet_input, current_state])\n output_parameter_values.append([mean, std, transition_probability])\n\n x_t = self.emission_model.sample(mean, std, sampling_temp=sampling_temp)\n\n # Prepare autoregressive input for next iteration\n prenet_input = torch.cat((prenet_input, x_t), dim=1)[:, 1:]\n\n outputs.append(x_t.flatten())\n\n transition_matrix = torch.cat((staying_probability, transition_probability))\n quantile *= staying_probability\n if not self.deterministic_transition:\n switch = transition_matrix.multinomial(1)[0].item()\n else:\n switch = quantile < duration_threshold\n\n if switch:\n current_state += 1\n quantile = 1\n\n states_travelled.append(current_state)\n\n if (current_state == input_lens) or (max_sampling_time and t == max_sampling_time - 1):\n break\n\n t += 1\n\n return (\n torch.stack(outputs, dim=0),\n F.one_hot(input_lens.new_tensor(states_travelled)),\n input_parameter_values,\n output_parameter_values,\n )\n\n @staticmethod\n def _initialize_log_state_priors(text_embeddings):\n \"\"\"Creates the log pi in forward algorithm.\n\n Args:\n text_embeddings (torch.FloatTensor): used to create the log pi\n on current device\n\n Shapes:\n - text_embeddings: (B, T, D_out_enc)\n \"\"\"\n N = text_embeddings.shape[1]\n log_state_priors = text_embeddings.new_full([N], -float(\"inf\"))\n log_state_priors[0] = 0.0\n return log_state_priors" }, { "identifier": "get_spec_from_most_probable_state", "path": "TTS/tts/layers/overflow/plotting_utils.py", "snippet": "def get_spec_from_most_probable_state(log_alpha_scaled, means, decoder=None):\n \"\"\"Get the most probable state means from the log_alpha_scaled.\n\n Args:\n log_alpha_scaled (torch.Tensor): Log alpha scaled values.\n - Shape: :math:`(T, N)`\n means (torch.Tensor): Means of the states.\n - Shape: :math:`(N, T, D_out)`\n decoder (torch.nn.Module): Decoder module to decode the latent to melspectrogram. Defaults to None.\n \"\"\"\n max_state_numbers = torch.max(log_alpha_scaled, dim=1)[1]\n max_len = means.shape[0]\n n_mel_channels = means.shape[2]\n max_state_numbers = max_state_numbers.unsqueeze(1).unsqueeze(1).expand(max_len, 1, n_mel_channels)\n means = torch.gather(means, 1, max_state_numbers).squeeze(1).to(log_alpha_scaled.dtype)\n if decoder is not None:\n mel = (\n decoder(means.T.unsqueeze(0), torch.tensor([means.shape[0]], device=means.device), reverse=True)[0]\n .squeeze(0)\n .T\n )\n else:\n mel = means\n return mel" }, { "identifier": "plot_transition_probabilities_to_numpy", "path": "TTS/tts/layers/overflow/plotting_utils.py", "snippet": "def plot_transition_probabilities_to_numpy(states, transition_probabilities, output_fig=False):\n \"\"\"Generates trainsition probabilities plot for the states and the probability of transition.\n\n Args:\n states (torch.IntTensor): the states\n transition_probabilities (torch.FloatTensor): the transition probabilities\n \"\"\"\n states = validate_numpy_array(states)\n transition_probabilities = validate_numpy_array(transition_probabilities)\n\n fig, ax = plt.subplots(figsize=(30, 3))\n ax.plot(transition_probabilities, \"o\")\n ax.set_title(\"Transition probability of state\")\n ax.set_xlabel(\"hidden state\")\n ax.set_ylabel(\"probability\")\n ax.set_xticks([i for i in range(len(transition_probabilities))]) # pylint: disable=unnecessary-comprehension\n ax.set_xticklabels([int(x) for x in states], rotation=90)\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "BaseTTS", "path": "TTS/tts/models/base_tts.py", "snippet": "class BaseTTS(BaseTrainerModel):\n \"\"\"Base `tts` class. Every new `tts` model must inherit this.\n\n It defines common `tts` specific functions on top of `Model` implementation.\n \"\"\"\n\n MODEL_TYPE = \"tts\"\n\n def __init__(\n self,\n config: Coqpit,\n ap: \"AudioProcessor\",\n tokenizer: \"TTSTokenizer\",\n speaker_manager: SpeakerManager = None,\n language_manager: LanguageManager = None,\n ):\n super().__init__()\n self.config = config\n self.ap = ap\n self.tokenizer = tokenizer\n self.speaker_manager = speaker_manager\n self.language_manager = language_manager\n self._set_model_args(config)\n\n def _set_model_args(self, config: Coqpit):\n \"\"\"Setup model args based on the config type (`ModelConfig` or `ModelArgs`).\n\n `ModelArgs` has all the fields reuqired to initialize the model architecture.\n\n `ModelConfig` has all the fields required for training, inference and containes `ModelArgs`.\n\n If the config is for training with a name like \"*Config\", then the model args are embeded in the\n config.model_args\n\n If the config is for the model with a name like \"*Args\", then we assign the directly.\n \"\"\"\n # don't use isintance not to import recursively\n if \"Config\" in config.__class__.__name__:\n config_num_chars = (\n self.config.model_args.num_chars if hasattr(self.config, \"model_args\") else self.config.num_chars\n )\n num_chars = config_num_chars if self.tokenizer is None else self.tokenizer.characters.num_chars\n if \"characters\" in config:\n self.config.num_chars = num_chars\n if hasattr(self.config, \"model_args\"):\n config.model_args.num_chars = num_chars\n self.args = self.config.model_args\n else:\n self.config = config\n self.args = config.model_args\n elif \"Args\" in config.__class__.__name__:\n self.args = config\n else:\n raise ValueError(\"config must be either a *Config or *Args\")\n\n def init_multispeaker(self, config: Coqpit, data: List = None):\n \"\"\"Initialize a speaker embedding layer if needen and define expected embedding channel size for defining\n `in_channels` size of the connected layers.\n\n This implementation yields 3 possible outcomes:\n\n 1. If `config.use_speaker_embedding` and `config.use_d_vector_file are False, do nothing.\n 2. If `config.use_d_vector_file` is True, set expected embedding channel size to `config.d_vector_dim` or 512.\n 3. If `config.use_speaker_embedding`, initialize a speaker embedding layer with channel size of\n `config.d_vector_dim` or 512.\n\n You can override this function for new models.\n\n Args:\n config (Coqpit): Model configuration.\n \"\"\"\n # set number of speakers\n if self.speaker_manager is not None:\n self.num_speakers = self.speaker_manager.num_speakers\n elif hasattr(config, \"num_speakers\"):\n self.num_speakers = config.num_speakers\n\n # set ultimate speaker embedding size\n if config.use_speaker_embedding or config.use_d_vector_file:\n self.embedded_speaker_dim = (\n config.d_vector_dim if \"d_vector_dim\" in config and config.d_vector_dim is not None else 512\n )\n # init speaker embedding layer\n if config.use_speaker_embedding and not config.use_d_vector_file:\n print(\" > Init speaker_embedding layer.\")\n self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim)\n self.speaker_embedding.weight.data.normal_(0, 0.3)\n\n def get_aux_input(self, **kwargs) -> Dict:\n \"\"\"Prepare and return `aux_input` used by `forward()`\"\"\"\n return {\"speaker_id\": None, \"style_wav\": None, \"d_vector\": None, \"language_id\": None}\n\n def get_aux_input_from_test_sentences(self, sentence_info):\n if hasattr(self.config, \"model_args\"):\n config = self.config.model_args\n else:\n config = self.config\n\n # extract speaker and language info\n text, speaker_name, style_wav, language_name = None, None, None, None\n\n if isinstance(sentence_info, list):\n if len(sentence_info) == 1:\n text = sentence_info[0]\n elif len(sentence_info) == 2:\n text, speaker_name = sentence_info\n elif len(sentence_info) == 3:\n text, speaker_name, style_wav = sentence_info\n elif len(sentence_info) == 4:\n text, speaker_name, style_wav, language_name = sentence_info\n else:\n text = sentence_info\n\n # get speaker id/d_vector\n speaker_id, d_vector, language_id = None, None, None\n if self.speaker_manager is not None:\n if config.use_d_vector_file:\n if speaker_name is None:\n d_vector = self.speaker_manager.get_random_embedding()\n else:\n d_vector = self.speaker_manager.get_d_vector_by_name(speaker_name)\n elif config.use_speaker_embedding:\n if speaker_name is None:\n speaker_id = self.speaker_manager.get_random_id()\n else:\n speaker_id = self.speaker_manager.name_to_id[speaker_name]\n\n # get language id\n if self.language_manager is not None and config.use_language_embedding and language_name is not None:\n language_id = self.language_manager.name_to_id[language_name]\n\n return {\n \"text\": text,\n \"speaker_id\": speaker_id,\n \"style_wav\": style_wav,\n \"d_vector\": d_vector,\n \"language_id\": language_id,\n }\n\n def format_batch(self, batch: Dict) -> Dict:\n \"\"\"Generic batch formatting for `TTSDataset`.\n\n You must override this if you use a custom dataset.\n\n Args:\n batch (Dict): [description]\n\n Returns:\n Dict: [description]\n \"\"\"\n # setup input batch\n text_input = batch[\"token_id\"]\n text_lengths = batch[\"token_id_lengths\"]\n speaker_names = batch[\"speaker_names\"]\n linear_input = batch[\"linear\"]\n mel_input = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n stop_targets = batch[\"stop_targets\"]\n item_idx = batch[\"item_idxs\"]\n d_vectors = batch[\"d_vectors\"]\n speaker_ids = batch[\"speaker_ids\"]\n attn_mask = batch[\"attns\"]\n waveform = batch[\"waveform\"]\n pitch = batch[\"pitch\"]\n energy = batch[\"energy\"]\n language_ids = batch[\"language_ids\"]\n max_text_length = torch.max(text_lengths.float())\n max_spec_length = torch.max(mel_lengths.float())\n\n # compute durations from attention masks\n durations = None\n if attn_mask is not None:\n durations = torch.zeros(attn_mask.shape[0], attn_mask.shape[2])\n for idx, am in enumerate(attn_mask):\n # compute raw durations\n c_idxs = am[:, : text_lengths[idx], : mel_lengths[idx]].max(1)[1]\n # c_idxs, counts = torch.unique_consecutive(c_idxs, return_counts=True)\n c_idxs, counts = torch.unique(c_idxs, return_counts=True)\n dur = torch.ones([text_lengths[idx]]).to(counts.dtype)\n dur[c_idxs] = counts\n # smooth the durations and set any 0 duration to 1\n # by cutting off from the largest duration indeces.\n extra_frames = dur.sum() - mel_lengths[idx]\n largest_idxs = torch.argsort(-dur)[:extra_frames]\n dur[largest_idxs] -= 1\n assert (\n dur.sum() == mel_lengths[idx]\n ), f\" [!] total duration {dur.sum()} vs spectrogram length {mel_lengths[idx]}\"\n durations[idx, : text_lengths[idx]] = dur\n\n # set stop targets wrt reduction factor\n stop_targets = stop_targets.view(text_input.shape[0], stop_targets.size(1) // self.config.r, -1)\n stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze(2)\n stop_target_lengths = torch.divide(mel_lengths, self.config.r).ceil_()\n\n return {\n \"text_input\": text_input,\n \"text_lengths\": text_lengths,\n \"speaker_names\": speaker_names,\n \"mel_input\": mel_input,\n \"mel_lengths\": mel_lengths,\n \"linear_input\": linear_input,\n \"stop_targets\": stop_targets,\n \"stop_target_lengths\": stop_target_lengths,\n \"attn_mask\": attn_mask,\n \"durations\": durations,\n \"speaker_ids\": speaker_ids,\n \"d_vectors\": d_vectors,\n \"max_text_length\": float(max_text_length),\n \"max_spec_length\": float(max_spec_length),\n \"item_idx\": item_idx,\n \"waveform\": waveform,\n \"pitch\": pitch,\n \"energy\": energy,\n \"language_ids\": language_ids,\n \"audio_unique_names\": batch[\"audio_unique_names\"],\n }\n\n def get_sampler(self, config: Coqpit, dataset: TTSDataset, num_gpus=1):\n weights = None\n data_items = dataset.samples\n\n if getattr(config, \"use_language_weighted_sampler\", False):\n alpha = getattr(config, \"language_weighted_sampler_alpha\", 1.0)\n print(\" > Using Language weighted sampler with alpha:\", alpha)\n weights = get_language_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_speaker_weighted_sampler\", False):\n alpha = getattr(config, \"speaker_weighted_sampler_alpha\", 1.0)\n print(\" > Using Speaker weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_speaker_balancer_weights(data_items) * alpha\n else:\n weights = get_speaker_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_length_weighted_sampler\", False):\n alpha = getattr(config, \"length_weighted_sampler_alpha\", 1.0)\n print(\" > Using Length weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_length_balancer_weights(data_items) * alpha\n else:\n weights = get_length_balancer_weights(data_items) * alpha\n\n if weights is not None:\n sampler = WeightedRandomSampler(weights, len(weights))\n else:\n sampler = None\n\n # sampler for DDP\n if sampler is None:\n sampler = DistributedSampler(dataset) if num_gpus > 1 else None\n else: # If a sampler is already defined use this sampler and DDP sampler together\n sampler = DistributedSamplerWrapper(sampler) if num_gpus > 1 else sampler\n\n return sampler\n\n def get_data_loader(\n self,\n config: Coqpit,\n assets: Dict,\n is_eval: bool,\n samples: Union[List[Dict], List[List]],\n verbose: bool,\n num_gpus: int,\n rank: int = None,\n ) -> \"DataLoader\":\n if is_eval and not config.run_eval:\n loader = None\n else:\n # setup multi-speaker attributes\n if self.speaker_manager is not None:\n if hasattr(config, \"model_args\"):\n speaker_id_mapping = (\n self.speaker_manager.name_to_id if config.model_args.use_speaker_embedding else None\n )\n d_vector_mapping = self.speaker_manager.embeddings if config.model_args.use_d_vector_file else None\n config.use_d_vector_file = config.model_args.use_d_vector_file\n else:\n speaker_id_mapping = self.speaker_manager.name_to_id if config.use_speaker_embedding else None\n d_vector_mapping = self.speaker_manager.embeddings if config.use_d_vector_file else None\n else:\n speaker_id_mapping = None\n d_vector_mapping = None\n\n # setup multi-lingual attributes\n if self.language_manager is not None:\n language_id_mapping = self.language_manager.name_to_id if self.args.use_language_embedding else None\n else:\n language_id_mapping = None\n\n # init dataloader\n dataset = TTSDataset(\n outputs_per_step=config.r if \"r\" in config else 1,\n compute_linear_spec=config.model.lower() == \"tacotron\" or config.compute_linear_spec,\n compute_f0=config.get(\"compute_f0\", False),\n f0_cache_path=config.get(\"f0_cache_path\", None),\n compute_energy=config.get(\"compute_energy\", False),\n energy_cache_path=config.get(\"energy_cache_path\", None),\n samples=samples,\n ap=self.ap,\n return_wav=config.return_wav if \"return_wav\" in config else False,\n batch_group_size=0 if is_eval else config.batch_group_size * config.batch_size,\n min_text_len=config.min_text_len,\n max_text_len=config.max_text_len,\n min_audio_len=config.min_audio_len,\n max_audio_len=config.max_audio_len,\n phoneme_cache_path=config.phoneme_cache_path,\n precompute_num_workers=config.precompute_num_workers,\n use_noise_augment=False if is_eval else config.use_noise_augment,\n verbose=verbose,\n speaker_id_mapping=speaker_id_mapping,\n d_vector_mapping=d_vector_mapping if config.use_d_vector_file else None,\n tokenizer=self.tokenizer,\n start_by_longest=config.start_by_longest,\n language_id_mapping=language_id_mapping,\n )\n\n # wait all the DDP process to be ready\n if num_gpus > 1:\n dist.barrier()\n\n # sort input sequences from short to long\n dataset.preprocess_samples()\n\n # get samplers\n sampler = self.get_sampler(config, dataset, num_gpus)\n\n loader = DataLoader(\n dataset,\n batch_size=config.eval_batch_size if is_eval else config.batch_size,\n shuffle=config.shuffle if sampler is None else False, # if there is no other sampler\n collate_fn=dataset.collate_fn,\n drop_last=config.drop_last, # setting this False might cause issues in AMP training.\n sampler=sampler,\n num_workers=config.num_eval_loader_workers if is_eval else config.num_loader_workers,\n pin_memory=False,\n )\n return loader\n\n def _get_test_aux_input(\n self,\n ) -> Dict:\n d_vector = None\n if self.config.use_d_vector_file:\n d_vector = [self.speaker_manager.embeddings[name][\"embedding\"] for name in self.speaker_manager.embeddings]\n d_vector = (random.sample(sorted(d_vector), 1),)\n\n aux_inputs = {\n \"speaker_id\": None\n if not self.config.use_speaker_embedding\n else random.sample(sorted(self.speaker_manager.name_to_id.values()), 1),\n \"d_vector\": d_vector,\n \"style_wav\": None, # TODO: handle GST style input\n }\n return aux_inputs\n\n def test_run(self, assets: Dict) -> Tuple[Dict, Dict]:\n \"\"\"Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Args:\n assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n \"\"\"\n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n aux_inputs = self._get_test_aux_input()\n for idx, sen in enumerate(test_sentences):\n if isinstance(sen, list):\n aux_inputs = self.get_aux_input_from_test_sentences(sen)\n sen = aux_inputs[\"text\"]\n outputs_dict = synthesis(\n self,\n sen,\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n use_griffin_lim=True,\n do_trim_silence=False,\n )\n test_audios[\"{}-audio\".format(idx)] = outputs_dict[\"wav\"]\n test_figures[\"{}-prediction\".format(idx)] = plot_spectrogram(\n outputs_dict[\"outputs\"][\"model_outputs\"], self.ap, output_fig=False\n )\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(\n outputs_dict[\"outputs\"][\"alignments\"], output_fig=False\n )\n return test_figures, test_audios\n\n def on_init_start(self, trainer):\n \"\"\"Save the speaker.pth and language_ids.json at the beginning of the training. Also update both paths.\"\"\"\n if self.speaker_manager is not None:\n output_path = os.path.join(trainer.output_path, \"speakers.pth\")\n self.speaker_manager.save_ids_to_file(output_path)\n trainer.config.speakers_file = output_path\n # some models don't have `model_args` set\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.speakers_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `speakers.pth` is saved to {output_path}.\")\n print(\" > `speakers_file` is updated in the config.json.\")\n\n if self.language_manager is not None:\n output_path = os.path.join(trainer.output_path, \"language_ids.json\")\n self.language_manager.save_ids_to_file(output_path)\n trainer.config.language_ids_file = output_path\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.language_ids_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `language_ids.json` is saved to {output_path}.\")\n print(\" > `language_ids_file` is updated in the config.json.\")" }, { "identifier": "SpeakerManager", "path": "TTS/tts/utils/speakers.py", "snippet": "class SpeakerManager(EmbeddingManager):\n \"\"\"Manage the speakers for multi-speaker 🐸TTS models. Load a datafile and parse the information\n in a way that can be queried by speaker or clip.\n\n There are 3 different scenarios considered:\n\n 1. Models using speaker embedding layers. The datafile only maps speaker names to ids used by the embedding layer.\n 2. Models using d-vectors. The datafile includes a dictionary in the following format.\n\n ::\n\n {\n 'clip_name.wav':{\n 'name': 'speakerA',\n 'embedding'[<d_vector_values>]\n },\n ...\n }\n\n\n 3. Computing the d-vectors by the speaker encoder. It loads the speaker encoder model and\n computes the d-vectors for a given clip or speaker.\n\n Args:\n d_vectors_file_path (str, optional): Path to the metafile including x vectors. Defaults to \"\".\n speaker_id_file_path (str, optional): Path to the metafile that maps speaker names to ids used by\n TTS models. Defaults to \"\".\n encoder_model_path (str, optional): Path to the speaker encoder model file. Defaults to \"\".\n encoder_config_path (str, optional): Path to the spealer encoder config file. Defaults to \"\".\n\n Examples:\n >>> # load audio processor and speaker encoder\n >>> ap = AudioProcessor(**config.audio)\n >>> manager = SpeakerManager(encoder_model_path=encoder_model_path, encoder_config_path=encoder_config_path)\n >>> # load a sample audio and compute embedding\n >>> waveform = ap.load_wav(sample_wav_path)\n >>> mel = ap.melspectrogram(waveform)\n >>> d_vector = manager.compute_embeddings(mel.T)\n \"\"\"\n\n def __init__(\n self,\n data_items: List[List[Any]] = None,\n d_vectors_file_path: str = \"\",\n speaker_id_file_path: str = \"\",\n encoder_model_path: str = \"\",\n encoder_config_path: str = \"\",\n use_cuda: bool = False,\n ):\n super().__init__(\n embedding_file_path=d_vectors_file_path,\n id_file_path=speaker_id_file_path,\n encoder_model_path=encoder_model_path,\n encoder_config_path=encoder_config_path,\n use_cuda=use_cuda,\n )\n\n if data_items:\n self.set_ids_from_data(data_items, parse_key=\"speaker_name\")\n\n @property\n def num_speakers(self):\n return len(self.name_to_id)\n\n @property\n def speaker_names(self):\n return list(self.name_to_id.keys())\n\n def get_speakers(self) -> List:\n return self.name_to_id\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", samples: Union[List[List], List[Dict]] = None) -> \"SpeakerManager\":\n \"\"\"Initialize a speaker manager from config\n\n Args:\n config (Coqpit): Config object.\n samples (Union[List[List], List[Dict]], optional): List of data samples to parse out the speaker names.\n Defaults to None.\n\n Returns:\n SpeakerEncoder: Speaker encoder object.\n \"\"\"\n speaker_manager = None\n if get_from_config_or_model_args_with_default(config, \"use_speaker_embedding\", False):\n if samples:\n speaker_manager = SpeakerManager(data_items=samples)\n if get_from_config_or_model_args_with_default(config, \"speaker_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speaker_file\", None)\n )\n if get_from_config_or_model_args_with_default(config, \"speakers_file\", None):\n speaker_manager = SpeakerManager(\n speaker_id_file_path=get_from_config_or_model_args_with_default(config, \"speakers_file\", None)\n )\n\n if get_from_config_or_model_args_with_default(config, \"use_d_vector_file\", False):\n speaker_manager = SpeakerManager()\n if get_from_config_or_model_args_with_default(config, \"d_vector_file\", None):\n speaker_manager = SpeakerManager(\n d_vectors_file_path=get_from_config_or_model_args_with_default(config, \"d_vector_file\", None)\n )\n return speaker_manager" }, { "identifier": "TTSTokenizer", "path": "TTS/tts/utils/text/tokenizer.py", "snippet": "class TTSTokenizer:\n \"\"\"🐸TTS tokenizer to convert input characters to token IDs and back.\n\n Token IDs for OOV chars are discarded but those are stored in `self.not_found_characters` for later.\n\n Args:\n use_phonemes (bool):\n Whether to use phonemes instead of characters. Defaults to False.\n\n characters (Characters):\n A Characters object to use for character-to-ID and ID-to-character mappings.\n\n text_cleaner (callable):\n A function to pre-process the text before tokenization and phonemization. Defaults to None.\n\n phonemizer (Phonemizer):\n A phonemizer object or a dict that maps language codes to phonemizer objects. Defaults to None.\n\n Example:\n\n >>> from TTS.tts.utils.text.tokenizer import TTSTokenizer\n >>> tokenizer = TTSTokenizer(use_phonemes=False, characters=Graphemes())\n >>> text = \"Hello world!\"\n >>> ids = tokenizer.text_to_ids(text)\n >>> text_hat = tokenizer.ids_to_text(ids)\n >>> assert text == text_hat\n \"\"\"\n\n def __init__(\n self,\n use_phonemes=False,\n text_cleaner: Callable = None,\n characters: \"BaseCharacters\" = None,\n phonemizer: Union[\"Phonemizer\", Dict] = None,\n add_blank: bool = False,\n use_eos_bos=False,\n ):\n self.text_cleaner = text_cleaner\n self.use_phonemes = use_phonemes\n self.add_blank = add_blank\n self.use_eos_bos = use_eos_bos\n self.characters = characters\n self.not_found_characters = []\n self.phonemizer = phonemizer\n\n @property\n def characters(self):\n return self._characters\n\n @characters.setter\n def characters(self, new_characters):\n self._characters = new_characters\n self.pad_id = self.characters.char_to_id(self.characters.pad) if self.characters.pad else None\n self.blank_id = self.characters.char_to_id(self.characters.blank) if self.characters.blank else None\n\n def encode(self, text: str) -> List[int]:\n \"\"\"Encodes a string of text as a sequence of IDs.\"\"\"\n token_ids = []\n for char in text:\n try:\n idx = self.characters.char_to_id(char)\n token_ids.append(idx)\n except KeyError:\n # discard but store not found characters\n if char not in self.not_found_characters:\n self.not_found_characters.append(char)\n print(text)\n print(f\" [!] Character {repr(char)} not found in the vocabulary. Discarding it.\")\n return token_ids\n\n def decode(self, token_ids: List[int]) -> str:\n \"\"\"Decodes a sequence of IDs to a string of text.\"\"\"\n text = \"\"\n for token_id in token_ids:\n text += self.characters.id_to_char(token_id)\n return text\n\n def text_to_ids(self, text: str, language: str = None) -> List[int]: # pylint: disable=unused-argument\n \"\"\"Converts a string of text to a sequence of token IDs.\n\n Args:\n text(str):\n The text to convert to token IDs.\n\n language(str):\n The language code of the text. Defaults to None.\n\n TODO:\n - Add support for language-specific processing.\n\n 1. Text normalizatin\n 2. Phonemization (if use_phonemes is True)\n 3. Add blank char between characters\n 4. Add BOS and EOS characters\n 5. Text to token IDs\n \"\"\"\n # TODO: text cleaner should pick the right routine based on the language\n if self.text_cleaner is not None:\n text = self.text_cleaner(text)\n if self.use_phonemes:\n text = self.phonemizer.phonemize(text, separator=\"\", language=language)\n text = self.encode(text)\n if self.add_blank:\n text = self.intersperse_blank_char(text, True)\n if self.use_eos_bos:\n text = self.pad_with_bos_eos(text)\n return text\n\n def ids_to_text(self, id_sequence: List[int]) -> str:\n \"\"\"Converts a sequence of token IDs to a string of text.\"\"\"\n return self.decode(id_sequence)\n\n def pad_with_bos_eos(self, char_sequence: List[str]):\n \"\"\"Pads a sequence with the special BOS and EOS characters.\"\"\"\n return [self.characters.bos_id] + list(char_sequence) + [self.characters.eos_id]\n\n def intersperse_blank_char(self, char_sequence: List[str], use_blank_char: bool = False):\n \"\"\"Intersperses the blank character between characters in a sequence.\n\n Use the ```blank``` character if defined else use the ```pad``` character.\n \"\"\"\n char_to_use = self.characters.blank_id if use_blank_char else self.characters.pad\n result = [char_to_use] * (len(char_sequence) * 2 + 1)\n result[1::2] = char_sequence\n return result\n\n def print_logs(self, level: int = 0):\n indent = \"\\t\" * level\n print(f\"{indent}| > add_blank: {self.add_blank}\")\n print(f\"{indent}| > use_eos_bos: {self.use_eos_bos}\")\n print(f\"{indent}| > use_phonemes: {self.use_phonemes}\")\n if self.use_phonemes:\n print(f\"{indent}| > phonemizer:\")\n self.phonemizer.print_logs(level + 1)\n if len(self.not_found_characters) > 0:\n print(f\"{indent}| > {len(self.not_found_characters)} not found characters:\")\n for char in self.not_found_characters:\n print(f\"{indent}| > {char}\")\n\n @staticmethod\n def init_from_config(config: \"Coqpit\", characters: \"BaseCharacters\" = None):\n \"\"\"Init Tokenizer object from config\n\n Args:\n config (Coqpit): Coqpit model config.\n characters (BaseCharacters): Defines the model character set. If not set, use the default options based on\n the config values. Defaults to None.\n \"\"\"\n # init cleaners\n text_cleaner = None\n if isinstance(config.text_cleaner, (str, list)):\n text_cleaner = getattr(cleaners, config.text_cleaner)\n\n # init characters\n if characters is None:\n # set characters based on defined characters class\n if config.characters and config.characters.characters_class:\n CharactersClass = import_class(config.characters.characters_class)\n characters, new_config = CharactersClass.init_from_config(config)\n # set characters based on config\n else:\n if config.use_phonemes:\n # init phoneme set\n characters, new_config = IPAPhonemes().init_from_config(config)\n else:\n # init character set\n characters, new_config = Graphemes().init_from_config(config)\n\n else:\n characters, new_config = characters.init_from_config(config)\n\n # set characters class\n new_config.characters.characters_class = get_import_path(characters)\n\n # init phonemizer\n phonemizer = None\n if config.use_phonemes:\n if \"phonemizer\" in config and config.phonemizer == \"multi_phonemizer\":\n lang_to_phonemizer_name = {}\n for dataset in config.datasets:\n if dataset.language != \"\":\n lang_to_phonemizer_name[dataset.language] = dataset.phonemizer\n else:\n raise ValueError(\"Multi phonemizer requires language to be set for each dataset.\")\n phonemizer = MultiPhonemizer(lang_to_phonemizer_name)\n else:\n phonemizer_kwargs = {\"language\": config.phoneme_language}\n if \"phonemizer\" in config and config.phonemizer:\n phonemizer = get_phonemizer_by_name(config.phonemizer, **phonemizer_kwargs)\n else:\n try:\n phonemizer = get_phonemizer_by_name(\n DEF_LANG_TO_PHONEMIZER[config.phoneme_language], **phonemizer_kwargs\n )\n new_config.phonemizer = phonemizer.name()\n except KeyError as e:\n raise ValueError(\n f\"\"\"No phonemizer found for language {config.phoneme_language}.\n You may need to install a third party library for this language.\"\"\"\n ) from e\n\n return (\n TTSTokenizer(\n config.use_phonemes, text_cleaner, characters, phonemizer, config.add_blank, config.enable_eos_bos_chars\n ),\n new_config,\n )" }, { "identifier": "plot_alignment", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_alignment(alignment, info=None, fig_size=(16, 10), title=None, output_fig=False, plot_log=False):\n if isinstance(alignment, torch.Tensor):\n alignment_ = alignment.detach().cpu().numpy().squeeze()\n else:\n alignment_ = alignment\n alignment_ = alignment_.astype(np.float32) if alignment_.dtype == np.float16 else alignment_\n fig, ax = plt.subplots(figsize=fig_size)\n im = ax.imshow(\n alignment_.T, aspect=\"auto\", origin=\"lower\", interpolation=\"none\", norm=LogNorm() if plot_log else None\n )\n fig.colorbar(im, ax=ax)\n xlabel = \"Decoder timestep\"\n if info is not None:\n xlabel += \"\\n\\n\" + info\n plt.xlabel(xlabel)\n plt.ylabel(\"Encoder timestep\")\n # plt.yticks(range(len(text)), list(text))\n plt.tight_layout()\n if title is not None:\n plt.title(title)\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "plot_spectrogram", "path": "TTS/tts/utils/visual.py", "snippet": "def plot_spectrogram(spectrogram, ap=None, fig_size=(16, 10), output_fig=False):\n if isinstance(spectrogram, torch.Tensor):\n spectrogram_ = spectrogram.detach().cpu().numpy().squeeze().T\n else:\n spectrogram_ = spectrogram.T\n spectrogram_ = spectrogram_.astype(np.float32) if spectrogram_.dtype == np.float16 else spectrogram_\n if ap is not None:\n spectrogram_ = ap.denormalize(spectrogram_) # pylint: disable=protected-access\n fig = plt.figure(figsize=fig_size)\n plt.imshow(spectrogram_, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n if not output_fig:\n plt.close()\n return fig" }, { "identifier": "format_aux_input", "path": "TTS/utils/generic_utils.py", "snippet": "def format_aux_input(def_args: Dict, kwargs: Dict) -> Dict:\n \"\"\"Format kwargs to hande auxilary inputs to models.\n\n Args:\n def_args (Dict): A dictionary of argument names and their default values if not defined in `kwargs`.\n kwargs (Dict): A `dict` or `kwargs` that includes auxilary inputs to the model.\n\n Returns:\n Dict: arguments with formatted auxilary inputs.\n \"\"\"\n kwargs = kwargs.copy()\n for name in def_args:\n if name not in kwargs or kwargs[name] is None:\n kwargs[name] = def_args[name]\n return kwargs" }, { "identifier": "load_fsspec", "path": "TTS/utils/io.py", "snippet": "def load_fsspec(\n path: str,\n map_location: Union[str, Callable, torch.device, Dict[Union[str, torch.device], Union[str, torch.device]]] = None,\n cache: bool = True,\n **kwargs,\n) -> Any:\n \"\"\"Like torch.load but can load from other locations (e.g. s3:// , gs://).\n\n Args:\n path: Any path or url supported by fsspec.\n map_location: torch.device or str.\n cache: If True, cache a remote file locally for subsequent calls. It is cached under `get_user_data_dir()/tts_cache`. Defaults to True.\n **kwargs: Keyword arguments forwarded to torch.load.\n\n Returns:\n Object stored in path.\n \"\"\"\n is_local = os.path.isdir(path) or os.path.isfile(path)\n if cache and not is_local:\n with fsspec.open(\n f\"filecache::{path}\",\n filecache={\"cache_storage\": str(get_user_data_dir(\"tts_cache\"))},\n mode=\"rb\",\n ) as f:\n return torch.load(f, map_location=map_location, **kwargs)\n else:\n with fsspec.open(path, \"rb\") as f:\n return torch.load(f, map_location=map_location, **kwargs)" } ]
import os import torch from typing import Dict, List, Union from coqpit import Coqpit from torch import nn from trainer.logging.tensorboard_logger import TensorboardLogger from TTS.tts.layers.overflow.common_layers import Encoder, OverflowUtils from TTS.tts.layers.overflow.neural_hmm import NeuralHMM from TTS.tts.layers.overflow.plotting_utils import ( get_spec_from_most_probable_state, plot_transition_probabilities_to_numpy, ) from TTS.tts.models.base_tts import BaseTTS from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.generic_utils import format_aux_input from TTS.utils.io import load_fsspec from TTS.utils.audio import AudioProcessor
18,911
@torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose) tokenizer, new_config = TTSTokenizer.init_from_config(config) speaker_manager = SpeakerManager.init_from_config(config, samples) return NeuralhmmTTS(new_config, ap, tokenizer, speaker_manager) def load_checkpoint( self, config: Coqpit, checkpoint_path: str, eval: bool = False, strict: bool = True, cache=False ): # pylint: disable=unused-argument, redefined-builtin state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) if eval: self.eval() assert not self.training def on_init_start(self, trainer): """If the current dataset does not have normalisation statistics and initialisation transition_probability it computes them otherwise loads.""" if not os.path.isfile(trainer.config.mel_statistics_parameter_path) or trainer.config.force_generate_statistics: dataloader = trainer.get_train_dataloader( training_assets=None, samples=trainer.train_samples, verbose=False ) print( f" | > Data parameters not found for: {trainer.config.mel_statistics_parameter_path}. Computing mel normalization parameters..." ) data_mean, data_std, init_transition_prob = OverflowUtils.get_data_parameters_for_flat_start( dataloader, trainer.config.out_channels, trainer.config.state_per_phone ) print( f" | > Saving data parameters to: {trainer.config.mel_statistics_parameter_path}: value: {data_mean, data_std, init_transition_prob}" ) statistics = { "mean": data_mean.item(), "std": data_std.item(), "init_transition_prob": init_transition_prob.item(), } torch.save(statistics, trainer.config.mel_statistics_parameter_path) else: print( f" | > Data parameters found for: {trainer.config.mel_statistics_parameter_path}. Loading mel normalization parameters..." ) statistics = torch.load(trainer.config.mel_statistics_parameter_path) data_mean, data_std, init_transition_prob = ( statistics["mean"], statistics["std"], statistics["init_transition_prob"], ) print(f" | > Data parameters loaded with value: {data_mean, data_std, init_transition_prob}") trainer.config.flat_start_params["transition_p"] = ( init_transition_prob.item() if torch.is_tensor(init_transition_prob) else init_transition_prob ) OverflowUtils.update_flat_start_transition(trainer.model, init_transition_prob) trainer.model.update_mean_std(statistics) @torch.inference_mode() def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use, unused-argument alignments, transition_vectors = outputs["alignments"], outputs["transition_vectors"] means = torch.stack(outputs["means"], dim=1) figures = { "alignment": plot_alignment(alignments[0].exp(), title="Forward alignment", fig_size=(20, 20)), "log_alignment": plot_alignment( alignments[0].exp(), title="Forward log alignment", plot_log=True, fig_size=(20, 20) ), "transition_vectors": plot_alignment(transition_vectors[0], title="Transition vectors", fig_size=(20, 20)),
class NeuralhmmTTS(BaseTTS): """Neural HMM TTS model. Paper:: https://arxiv.org/abs/2108.13320 Paper abstract:: Neural sequence-to-sequence TTS has achieved significantly better output quality than statistical speech synthesis using HMMs.However, neural TTS is generally not probabilistic and uses non-monotonic attention. Attention failures increase training time and can make synthesis babble incoherently. This paper describes how the old and new paradigms can be combined to obtain the advantages of both worlds, by replacing attention in neural TTS with an autoregressive left-right no-skip hidden Markov model defined by a neural network. Based on this proposal, we modify Tacotron 2 to obtain an HMM-based neural TTS model with monotonic alignment, trained to maximise the full sequence likelihood without approximation. We also describe how to combine ideas from classical and contemporary TTS for best results. The resulting example system is smaller and simpler than Tacotron 2, and learns to speak with fewer iterations and less data, whilst achieving comparable naturalness prior to the post-net. Our approach also allows easy control over speaking rate. Audio examples and code are available at https://shivammehta25.github.io/Neural-HMM/ . Note: - This is a parameter efficient version of OverFlow (15.3M vs 28.6M). Since it has half the number of parameters as OverFlow the synthesis output quality is suboptimal (but comparable to Tacotron2 without Postnet), but it learns to speak with even lesser amount of data and is still significantly faster than other attention-based methods. - Neural HMMs uses flat start initialization i.e it computes the means and std and transition probabilities of the dataset and uses them to initialize the model. This benefits the model and helps with faster learning If you change the dataset or want to regenerate the parameters change the `force_generate_statistics` and `mel_statistics_parameter_path` accordingly. - To enable multi-GPU training, set the `use_grad_checkpointing=False` in config. This will significantly increase the memory usage. This is because to compute the actual data likelihood (not an approximation using MAS/Viterbi) we must use all the states at the previous time step during the forward pass to decide the probability distribution at the current step i.e the difference between the forward algorithm and viterbi approximation. Check :class:`TTS.tts.configs.neuralhmm_tts_config.NeuralhmmTTSConfig` for class arguments. """ def __init__( self, config: "NeuralhmmTTSConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change self.config = config for key in config: setattr(self, key, config[key]) self.encoder = Encoder(config.num_chars, config.state_per_phone, config.encoder_in_out_features) self.neural_hmm = NeuralHMM( frame_channels=self.out_channels, ar_order=self.ar_order, deterministic_transition=self.deterministic_transition, encoder_dim=self.encoder_in_out_features, prenet_type=self.prenet_type, prenet_dim=self.prenet_dim, prenet_n_layers=self.prenet_n_layers, prenet_dropout=self.prenet_dropout, prenet_dropout_at_inference=self.prenet_dropout_at_inference, memory_rnn_dim=self.memory_rnn_dim, outputnet_size=self.outputnet_size, flat_start_params=self.flat_start_params, std_floor=self.std_floor, use_grad_checkpointing=self.use_grad_checkpointing, ) self.register_buffer("mean", torch.tensor(0)) self.register_buffer("std", torch.tensor(1)) def update_mean_std(self, statistics_dict: Dict): self.mean.data = torch.tensor(statistics_dict["mean"]) self.std.data = torch.tensor(statistics_dict["std"]) def preprocess_batch(self, text, text_len, mels, mel_len): if self.mean.item() == 0 or self.std.item() == 1: statistics_dict = torch.load(self.mel_statistics_parameter_path) self.update_mean_std(statistics_dict) mels = self.normalize(mels) return text, text_len, mels, mel_len def normalize(self, x): return x.sub(self.mean).div(self.std) def inverse_normalize(self, x): return x.mul(self.std).add(self.mean) def forward(self, text, text_len, mels, mel_len): """ Forward pass for training and computing the log likelihood of a given batch. Shapes: Shapes: text: :math:`[B, T_in]` text_len: :math:`[B]` mels: :math:`[B, T_out, C]` mel_len: :math:`[B]` """ text, text_len, mels, mel_len = self.preprocess_batch(text, text_len, mels, mel_len) encoder_outputs, encoder_output_len = self.encoder(text, text_len) log_probs, fwd_alignments, transition_vectors, means = self.neural_hmm( encoder_outputs, encoder_output_len, mels.transpose(1, 2), mel_len ) outputs = { "log_probs": log_probs, "alignments": fwd_alignments, "transition_vectors": transition_vectors, "means": means, } return outputs @staticmethod def _training_stats(batch): stats = {} stats["avg_text_length"] = batch["text_lengths"].float().mean() stats["avg_spec_length"] = batch["mel_lengths"].float().mean() stats["avg_text_batch_occupancy"] = (batch["text_lengths"].float() / batch["text_lengths"].float().max()).mean() stats["avg_spec_batch_occupancy"] = (batch["mel_lengths"].float() / batch["mel_lengths"].float().max()).mean() return stats def train_step(self, batch: dict, criterion: nn.Module): text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] outputs = self.forward( text=text_input, text_len=text_lengths, mels=mel_input, mel_len=mel_lengths, ) loss_dict = criterion(outputs["log_probs"] / (mel_lengths.sum() + text_lengths.sum())) # for printing useful statistics on terminal loss_dict.update(self._training_stats(batch)) return outputs, loss_dict def eval_step(self, batch: Dict, criterion: nn.Module): return self.train_step(batch, criterion) def _format_aux_input(self, aux_input: Dict, default_input_dict): """Set missing fields to their default value. Args: aux_inputs (Dict): Dictionary containing the auxiliary inputs. """ default_input_dict = default_input_dict.copy() default_input_dict.update( { "sampling_temp": self.sampling_temp, "max_sampling_time": self.max_sampling_time, "duration_threshold": self.duration_threshold, } ) if aux_input: return format_aux_input(default_input_dict, aux_input) return default_input_dict @torch.no_grad() def inference( self, text: torch.Tensor, aux_input={"x_lengths": None, "sampling_temp": None, "max_sampling_time": None, "duration_threshold": None}, ): # pylint: disable=dangerous-default-value """Sampling from the model Args: text (torch.Tensor): :math:`[B, T_in]` aux_inputs (_type_, optional): _description_. Defaults to None. Returns: outputs: Dictionary containing the following - mel (torch.Tensor): :math:`[B, T_out, C]` - hmm_outputs_len (torch.Tensor): :math:`[B]` - state_travelled (List[List[int]]): List of lists containing the state travelled for each sample in the batch. - input_parameters (list[torch.FloatTensor]): Input parameters to the neural HMM. - output_parameters (list[torch.FloatTensor]): Output parameters to the neural HMM. """ default_input_dict = { "x_lengths": torch.sum(text != 0, dim=1), } aux_input = self._format_aux_input(aux_input, default_input_dict) encoder_outputs, encoder_output_len = self.encoder.inference(text, aux_input["x_lengths"]) outputs = self.neural_hmm.inference( encoder_outputs, encoder_output_len, sampling_temp=aux_input["sampling_temp"], max_sampling_time=aux_input["max_sampling_time"], duration_threshold=aux_input["duration_threshold"], ) mels, mel_outputs_len = outputs["hmm_outputs"], outputs["hmm_outputs_len"] mels = self.inverse_normalize(mels) outputs.update({"model_outputs": mels, "model_outputs_len": mel_outputs_len}) outputs["alignments"] = OverflowUtils.double_pad(outputs["alignments"]) return outputs @staticmethod def get_criterion(): return NLLLoss() @staticmethod def init_from_config(config: "NeuralhmmTTSConfig", samples: Union[List[List], List[Dict]] = None, verbose=True): """Initiate model from config Args: config (VitsConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. verbose (bool): If True, print init messages. Defaults to True. """ ap = AudioProcessor.init_from_config(config, verbose) tokenizer, new_config = TTSTokenizer.init_from_config(config) speaker_manager = SpeakerManager.init_from_config(config, samples) return NeuralhmmTTS(new_config, ap, tokenizer, speaker_manager) def load_checkpoint( self, config: Coqpit, checkpoint_path: str, eval: bool = False, strict: bool = True, cache=False ): # pylint: disable=unused-argument, redefined-builtin state = load_fsspec(checkpoint_path, map_location=torch.device("cpu")) self.load_state_dict(state["model"]) if eval: self.eval() assert not self.training def on_init_start(self, trainer): """If the current dataset does not have normalisation statistics and initialisation transition_probability it computes them otherwise loads.""" if not os.path.isfile(trainer.config.mel_statistics_parameter_path) or trainer.config.force_generate_statistics: dataloader = trainer.get_train_dataloader( training_assets=None, samples=trainer.train_samples, verbose=False ) print( f" | > Data parameters not found for: {trainer.config.mel_statistics_parameter_path}. Computing mel normalization parameters..." ) data_mean, data_std, init_transition_prob = OverflowUtils.get_data_parameters_for_flat_start( dataloader, trainer.config.out_channels, trainer.config.state_per_phone ) print( f" | > Saving data parameters to: {trainer.config.mel_statistics_parameter_path}: value: {data_mean, data_std, init_transition_prob}" ) statistics = { "mean": data_mean.item(), "std": data_std.item(), "init_transition_prob": init_transition_prob.item(), } torch.save(statistics, trainer.config.mel_statistics_parameter_path) else: print( f" | > Data parameters found for: {trainer.config.mel_statistics_parameter_path}. Loading mel normalization parameters..." ) statistics = torch.load(trainer.config.mel_statistics_parameter_path) data_mean, data_std, init_transition_prob = ( statistics["mean"], statistics["std"], statistics["init_transition_prob"], ) print(f" | > Data parameters loaded with value: {data_mean, data_std, init_transition_prob}") trainer.config.flat_start_params["transition_p"] = ( init_transition_prob.item() if torch.is_tensor(init_transition_prob) else init_transition_prob ) OverflowUtils.update_flat_start_transition(trainer.model, init_transition_prob) trainer.model.update_mean_std(statistics) @torch.inference_mode() def _create_logs(self, batch, outputs, ap): # pylint: disable=no-self-use, unused-argument alignments, transition_vectors = outputs["alignments"], outputs["transition_vectors"] means = torch.stack(outputs["means"], dim=1) figures = { "alignment": plot_alignment(alignments[0].exp(), title="Forward alignment", fig_size=(20, 20)), "log_alignment": plot_alignment( alignments[0].exp(), title="Forward log alignment", plot_log=True, fig_size=(20, 20) ), "transition_vectors": plot_alignment(transition_vectors[0], title="Transition vectors", fig_size=(20, 20)),
"mel_from_most_probable_state": plot_spectrogram(
9
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,468
v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, controlnet_condition: list = None, controlnet_conditioning_scale: float = 1.0, context_frames: int = 16, context_stride: int = 1, context_overlap: int = 4, context_batch_size: int = 1, context_schedule: str = "uniform", init_latents: Optional[torch.FloatTensor] = None, num_actual_inference_steps: Optional[int] = None, appearance_encoder = None, reference_control_writer = None, reference_control_reader = None, source_image: str = None, decoder_consistency = None, **kwargs, ): """ New args: - controlnet_condition : condition map (e.g., depth, canny, keypoints) for controlnet - controlnet_conditioning_scale : conditioning scale for controlnet - init_latents : initial latents to begin with (used along with invert()) - num_actual_inference_steps : number of actual inference steps (while total steps is num_inference_steps) """ controlnet = self.controlnet # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) text_embeddings = torch.cat([text_embeddings] * context_batch_size)
reference_control_writer = ReferenceAttentionControl(appearance_encoder, do_classifier_free_guidance=True, mode='write', batch_size=context_batch_size)
2
2023-11-21 08:33:54+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword arguments\n \n Args:\n kwargs (dict): Keyword arguments\n \"\"\"\n self.kwargs = kwargs\n self.__dict__.update(kwargs)\n\n def generate_list_settings(self, list_):\n \"\"\"\n Converts provided list to a normalized list that can be stored as a json object to serialize.\n \n Args:\n list_ (List): List to be converted\n Returns\n Transformed normal list\n \"\"\"\n normal_list = []\n for item in list_:\n if isinstance(item, BaseClass):\n normal_list.append(item.generate_settings())\n elif isinstance(item, dict):\n normal_list.append(self.generate_kwarg_setting(item))\n elif isinstance(item, (tuple, list)):\n normal_list.append(self.generate_list_settings(item))\n else:\n normal_list.append(item)\n return normal_list\n\n def generate_kwarg_setting(self, kwargs):\n \"\"\"\n Converts provided keyword arguments to normal kwargs in terms of serialization.\n\n Args:\n kwargs (dict): kwargs to be converted.\n \"\"\"\n normal_kwargs = dict()\n for kwarg in kwargs:\n if isinstance(kwargs[kwarg], BaseClass):\n normal_kwargs[kwarg] = kwargs[kwarg].generate_settings()\n elif isinstance(kwargs[kwarg], (list, tuple)):\n normal_kwargs[kwarg] = self.generate_list_settings(kwargs[kwarg])\n elif isinstance(kwargs[kwarg], dict):\n normal_kwargs[kwarg] = self.generate_kwarg_setting(kwargs[kwarg])\n else:\n normal_kwargs[kwarg] = kwargs[kwarg]\n \n return normal_kwargs\n\n\n def generate_settings(self):\n \"\"\"\n Generates settings for the instance of the BaseClass.\n\n Returns\n Settings in dictionary format.\n \"\"\"\n settings = {\n \"class\": self.__class__.__name__, \n **self.generate_kwarg_setting({kwarg: self.__dict__[kwarg] for kwarg in self.kwargs}), \n }\n return settings\n \n def save(self, path):\n \"\"\"\n Saves the generated settings into a JSON file at a specified path.\n \n Args:\n path (string): The file path at which the settings have to be saved.\n \"\"\"\n settings = self.generate_settings()\n\n if os.path.dirname(path) != \"\":\n os.makedirs(os.path.dirname(path), exist_ok=True)\n \n with open(path, \"w\") as f:\n json.dump(settings, f, indent=2)\n\n @classmethod\n def get_all_subclasses(cls):\n \"\"\"\n Returns all subclasses of the BaseClass.\n \"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass.get_all_subclasses())\n\n return all_subclasses\n\n @staticmethod\n def find_class(cls_name):\n \"\"\"\n Searches for a class that matches the given class name.\n\n Args:\n cls_name (string): Class name to be matched\n \"\"\"\n for possible_cls in BaseClass.get_all_subclasses():\n if possible_cls.__name__ == cls_name:\n return possible_cls\n return None\n\n @staticmethod\n def load_from_list_settings(list_):\n \"\"\"\n Deserializes the list saved settings to instantiate the objects.\n\n Args:\n list_ (List): List of saved settings\n \"\"\"\n output_list = []\n for item in list_:\n if isinstance(item, dict):\n output_list.append(BaseClass.load_from_dict(item))\n elif isinstance(item, (tuple, list)):\n output_list.append(BaseClass.load_from_list_settings(item))\n else:\n output_list.append(item)\n\n return output_list\n \n @staticmethod\n def load_from_dict(dict_):\n \"\"\"\n Deserializes the dictionary saved settings to instantiate the objects.\n\n Args:\n dict_ (dict): Dictionary containing saved settings\n \"\"\"\n other_class = BaseClass.find_class(dict_.get(\"class\", None))\n if other_class is not None:\n return other_class.load_from_settings(dict_)\n \n output_dict = dict()\n for key in dict_:\n if isinstance(dict_[key], dict):\n output_dict[key] = BaseClass.load_from_dict(dict_[key])\n elif isinstance(dict_[key], (tuple, list)):\n output_dict[key] = BaseClass.load_from_list_settings(dict_[key])\n else:\n output_dict[key] = dict_[key]\n\n return output_dict\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Deserializes the saved settings to instantiate the object.\n\n Args:\n settings (dict): Saved settings\n \"\"\"\n cls = BaseClass.find_class(settings[\"class\"])\n\n if cls is None:\n logger.error(f\"Could not find class {settings['class']} when loading class.\")\n return None\n\n kwargs = dict()\n for kwarg in settings:\n if kwarg == \"class\":\n continue\n if isinstance(settings[kwarg], dict):\n kwargs[kwarg] = BaseClass.load_from_dict(settings[kwarg])\n elif isinstance(settings[kwarg], (tuple, list)):\n kwargs[kwarg] = BaseClass.load_from_list_settings(settings[kwarg])\n else:\n kwargs[kwarg] = settings[kwarg]\n\n return cls(**kwargs)\n\n @classmethod\n def _load(cls, path, **kwargs):\n \"\"\"\n Loads the settings from the JSON file at the specified path.\n \n Args:\n path (string): The file path from which the settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n for kwarg in kwargs:\n settings[kwarg] = kwargs[kwarg]\n return cls.load_from_settings(settings)\n\n @staticmethod\n def load(path, **kwargs):\n \"\"\"\n Loads the settings of the class from the JSON file.\n\n Args:\n path (string): The file path from which the class settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n cls = BaseClass.find_class(settings[\"class\"])\n return cls._load(path, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the class object.\n \"\"\"\n return f\"{self.__class__.__name__}({self.kwargs})\"\n \n def __eq__(self, o: object) -> bool:\n \"\"\"\n Checks whether the provided object is equal to the current object.\n\n Args:\n o (object): Object to compare\n \"\"\"\n if not isinstance(o, BaseClass):\n return False\n \n other_settings = o.generate_settings()\n settings = self.generate_settings()\n\n return other_settings == settings" }, { "identifier": "CustomDataset", "path": "src/model_arithmetic/dataset.py", "snippet": "class CustomDataset(Dataset):\n \"\"\"\n A custom PyTorch Dataset class for tokenized sequence data.\n\n Uses a tokenizer to convert text data from a DataFrame to input_ids (tokens), \n and optionally attaches label data if present in the DataFrame.\n \"\"\"\n def __init__(self, tokenizer, df, max_tokens=128, min_tokens=1, random_cutoff=False):\n \"\"\"\n Initializes the CustomDataset object.\n\n Args:\n tokenizer (Tokenizer): The tokenizer to be used for the text data.\n df (pandas.DataFrame): DataFrame containing the text data, and optionally labels.\n max_tokens (int, optional): Maximum number of tokens per sequence. Defaults to 128.\n min_tokens (int, optional): Minimum number of tokens per sequence. Defaults to 1.\n random_cutoff (bool, optional): Whether to randomly cut off the number of tokens per sequence. Defaults to False.\n \"\"\"\n super().__init__()\n data = df.dropna()\n self.tokenized_dataset = [\n tokenizer(sentence, return_tensors=\"pt\", truncation=True, max_length=max_tokens).input_ids.view(-1) for sentence in tqdm(data[\"text\"].tolist())\n ]\n\n self.df = data\n self.has_labels = \"label\" in data.columns\n self.min_tokens = min_tokens\n self.labels = None\n if self.has_labels:\n self.labels = data[\"label\"].values\n \n self.random_cutoff = random_cutoff\n\n def __len__(self):\n \"\"\"\n Returns the length of the tokenized dataset, \n i.e., the number of tokenized sequences.\n \n Returns:\n int: Number of tokenized sequences.\n \"\"\"\n return len(self.tokenized_dataset)\n\n def __getitem__(self, idx):\n \"\"\"\n Fetches an item from the dataset at the given index.\n\n If labels are available, also fetches the associated label.\n If `random_cutoff` is true, may truncate sequence length randomly.\n\n Args:\n idx (int): Index of the required sequence.\n\n Returns:\n dict: A dictionary with the following structure-\n {\n \"input_ids\": torch.Tensor (Tokenized sequence),\n \"labels\": torch.Tensor (Associated label, if available)\n }\n \"\"\"\n cutoff = len(self.tokenized_dataset[idx])\n if self.random_cutoff:\n cutoff = torch.randint(min(cutoff, self.min_tokens), cutoff + 1, (1,)).item()\n \n if not self.has_labels:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff]}\n else:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff], \"labels\": torch.tensor([self.labels[idx]], dtype=torch.long)}" }, { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "ModelArithmetic", "path": "src/model_arithmetic/model_arithmetic.py", "snippet": "class ModelArithmetic(PreTrainedModel):\n \"\"\"\n Main class for prompt arithmetic. Handles the generation of text based on the formula.\n \"\"\"\n SAVE_FILE = \"prompt_arithmetic.json\"\n _supports_sdpa = True\n\n def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, \n retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None):\n \"\"\"Initializes the prompt arithmetic model.\n\n Args:\n formula (Operator): The formula for which generations need to be made.\n default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None.\n dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16.\n intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False.\n epsilon (float, optional): Just some small value. Defaults to 1e-12.\n retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to [].\n calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True.\n needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task.\n lm_eval_task (str, optional): Name of the lm eval task. Defaults to None.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None.\n \"\"\"\n self.formula = formula.clone()\n\n self.default_model = default_model\n self.loaded_models = dict()\n self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing)\n self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn\n self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator\n \n self.output_type = namedtuple(\"ModelArithmeticOutput\", [\"logits\", \"logprobs_per_model\"])\n self.intermediate_argmax = intermediate_argmax\n self.retroactive_operators = retroactive_operators\n self.calculate_statistics = calculate_statistics\n\n self.runnable_operators = []\n for runnable_operator in self.formula.runnable_operators():\n if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]):\n self.runnable_operators.append(runnable_operator)\n \n\n # sort the prompts by speculative factor, putting the one with highest speculative factor first\n # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones\n # however, we first need to sort by run_priority and then within that by speculative factor\n self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True)\n \n self.load_all_models(dtype=dtype)\n if self.default_model not in self.loaded_models:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = runnable_operator.model\n break\n if self.default_model is None:\n raise ValueError(\"Default model must be specified if not specified in an llm prompt\")\n\n self.config = self.loaded_models[str(self.default_model)].config\n\n if tokenizer is None:\n self.tokenizer = load_tokenizer(self.default_model)\n else:\n self.tokenizer = tokenizer\n \n self.init_runnable_operators()\n \n self.model_input_tokens = {\n runnable_operator.id(): TokenizedInput(runnable_operator, \n runnable_operator.model, \n self.loaded_models[str(runnable_operator.model)].config,\n self.tokenizer) \n for runnable_operator in self.runnable_operators\n }\n \n self.init_monitor()\n \n self.epsilon = epsilon\n \n self.word_size = len(self.tokenizer)\n \n if Compatibility is not None:\n self.lm_eval_compatibility = Compatibility(\n task_name=lm_eval_task,\n needs_input_tokens_lm_eval=needs_input_tokens_lm_eval,\n tokenizer=self.tokenizer,\n device=self.device,\n max_length=get_max_length(self.config),\n )\n else:\n self.lm_eval_compatibility = None\n \n super().__init__(self.config)\n \n def init_monitor(self):\n \"\"\"\n Initializes the monitor for the prompt arithmetic model.\n \"\"\"\n self.monitor = Monitor(self.runnable_operators)\n \n def init_runnable_operators(self):\n \"\"\"Initializes the runnable operators. This is done after the models have been loaded, because the models are needed for the runnable operators.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n runnable_operator.model = self.default_model\n runnable_operator.initialize_after_model_set()\n\n def load_all_models(self, dtype=torch.bfloat16):\n \"\"\"Loads all the models that are needed for the runnable operators. Models are never loaded twice.\n\n Args:\n dtype (torch.dtype, optional): Default Dtype of the models. Defaults to torch.bfloat16.\n \"\"\"\n if self.default_model is None:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = str(runnable_operator.model)\n break\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n assert self.default_model is not None, \"Default model must be specified if not specified in prompt\"\n runnable_operator.model = self.default_model\n if runnable_operator.model not in self.loaded_models:\n model = runnable_operator.load_model(dtype=dtype)\n model.eval()\n if model is not None:\n self.loaded_models[str(runnable_operator.model)] = model\n \n if len(self.loaded_models) == 0:\n assert self.default_model is not None, \"Required to at least have one model, for now\"\n self.loaded_models[str(self.default_model)] = load_model(self.default_model, dtype=dtype)\n \n @property\n def device(self):\n \"\"\"Device of the default model. Needed for compatibility with lm_eval\n\n Returns:\n torch.device: Device of the default model.\n \"\"\"\n return self.loaded_models[str(self.default_model)].device\n\n def save_pretrained(self, path : str):\n \"\"\"Saves the model to the specified path.\n\n Args:\n path (str): Path to which to save the model\n \"\"\"\n os.makedirs(path, exist_ok=True)\n all_settings = {\n \"formula\": self.formula.generate_settings(),\n \"default_model\": self.default_model,\n }\n\n with open(os.path.join(path, self.SAVE_FILE), \"w\") as f:\n json.dump(all_settings, f, indent=4, sort_keys=True)\n\n @classmethod\n def from_pretrained(cls, path : str, dtype=torch.bfloat16):\n \"\"\"Loads the model from the specified path.\n\n Args:\n path (str): Path from which to load the model\n dtype (torch.dtype, optional): Default dtype for the models. Defaults to torch.bfloat16.\n\n Returns:\n ModelArithmetic: model arithmetic model\n \"\"\"\n with open(os.path.join(path, cls.SAVE_FILE), \"r\") as f:\n all_settings = json.load(f)\n all_settings[\"formula\"] = Operator.load_from_settings(all_settings[\"formula\"])\n return cls(**all_settings, dtype=dtype)\n\n \n def forward_model(self, runnable_operator, continuation_tokens, model_new_tokens=None, use_cache=False, do_speculation=False):\n \"\"\"Runs a specifc runnable operator on the continuation tokens.\n\n Args:\n runnable_operator (RunnableOperator): The runnable operator to run.\n continuation_tokens (list[list[int]]): List of tokens that need to be continued. The prompt is not included in these tokens\n model_new_tokens (list[int], optional): New tokens for the model. Defaults to None.\n use_cache (bool, optional): Whether or not to allow the model to use cache (eg key-value storage for an LLM). Defaults to False.\n do_speculation (bool, optional): Whether or not to do speculation sampling. Defaults to False.\n\n Returns:\n torch.tensor: logprobs of the model, one logprob distribution for each new token in each sample\n \"\"\"\n start_time = time.time()\n \n tokenized_input_creator = self.model_input_tokens[runnable_operator.id()]\n tokenized_inputs = tokenized_input_creator.add_continuation_tokens(continuation_tokens)\n tokenized_only_input = tokenized_input_creator.get_only_input_tokens()\n \n was_none = model_new_tokens is None\n \n if was_none:\n model_new_tokens = torch.tensor([len(continuation_tokens[i]) + 1 for i in range(len(continuation_tokens))])\n \n if len(self.model_prediction_history) < len(continuation_tokens):\n new_prediction_history = [dict() for _ in range(len(continuation_tokens))]\n else:\n new_prediction_history = [self.model_prediction_history[i].get(self.max_index_prediction_history(i), dict()) for i in range(len(continuation_tokens))]\n \n logprobs = runnable_operator.run(\n loaded_models=self.loaded_models,\n tokenized_inputs=tokenized_inputs,\n model_new_tokens=model_new_tokens,\n new_prediction_history=new_prediction_history,\n other_tokenizer=self.tokenizer,\n tokenized_only_input=tokenized_only_input, \n use_cache=use_cache,\n do_speculation=do_speculation\n )\n \n logprobs = [logprob.to(self.device) for logprob in logprobs]\n \n if was_none:\n logprobs = torch.stack(logprobs, dim=0)\n\n self.monitor.add_result(element=time.time() - start_time, runnable_operator=runnable_operator)\n return logprobs\n \n def group_complete(self, model_history):\n \"\"\"Checks which groups of runnable operators have been completely calculated and which haven't.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict[bool]: Dict mapping the group to whether it has been completely calculated or not\n \"\"\"\n # everything that is a group needs to be either all calculated or all not calculated\n group_calculated = dict()\n groups = set([runnable_operator.group for runnable_operator in self.runnable_operators if runnable_operator.group is not None])\n completed_groups = {group: True for group in groups}\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is not None:\n is_calculated = model_history.get(runnable_operator.id()) is not None\n if runnable_operator.group not in group_calculated:\n group_calculated[runnable_operator.group] = is_calculated\n elif group_calculated[runnable_operator.group] != is_calculated:\n completed_groups[runnable_operator.group] = False\n return completed_groups\n \n def group_model_history(self, model_history):\n \"\"\"Sets the model history on which to evaluate the formula based on the groups. Removes predictions if the group hasn't been completely calculated yet.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict: Adjusted dict mapping\n \"\"\"\n completed_groups = self.group_complete(model_history)\n grouped_model_history = dict()\n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is None or completed_groups[runnable_operator.group]:\n grouped_model_history[runnable_operator.id()] = model_history[runnable_operator.id()]\n else:\n grouped_model_history[runnable_operator.id()] = None\n \n return grouped_model_history\n \n def create_sample_logprobs(self, logprobs, temperature, top_k, top_p):\n \"\"\"Creates the logprobs for each token in each sample.\n\n Args:\n logprobs (torch.tensor): Logprobs of the model\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n torch.tensor: Logprobs for each token in each sample\n \"\"\"\n if temperature == 0:\n logprobs_argmax = torch.argmax(logprobs, dim=-1)\n logprobs = torch.nn.functional.one_hot(logprobs_argmax, num_classes=logprobs.shape[-1]).float()\n return logprobs\n logprobs = logprobs / temperature\n logprobs = top_k_top_p_filtering(logprobs.unsqueeze(0), top_k=top_k, top_p=top_p)\n return torch.softmax(logprobs, dim=-1).squeeze()\n \n \n\n def process_logprobs(self, model_history):\n \"\"\"Processes model history to get the probability distribution for the token.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n _type_: _description_\n \"\"\"\n init_time = time.time()\n logprobs_normalized = self.formula.evaluate(model_history)\n self.monitor.add_result(element=time.time() - init_time, indicator=\"formula_evaluation\")\n if not torch.is_tensor(logprobs_normalized):\n return None\n # logprobs_normalized = logprobs_normalized / temperature\n # logprobs_normalized = top_k_top_p_filtering(logprobs_normalized.unsqueeze(0), top_k=top_k, top_p=top_p)\n return logprobs_normalized\n \n def run_retroactive_operators(self, index, tokenized_sentence, temperature, top_k, top_p):\n \"\"\"Runs the retroactive operators on the tokenized sentence. \n\n Args:\n index (int): Index of the sentence in the current batch\n tokenized_sentence (list[int]): Tokenized sentence\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n list[int]: Adjusted tokenized sentence based on the retroactive operators and whether they accepted it.\n \"\"\"\n for operator in self.retroactive_operators:\n accepted = operator.accept(tokenized_sentence, self.tokenizer)\n if accepted < 0:\n not_accepted_token = tokenized_sentence[accepted]\n self.clear_model_prediction_history(index, tokenized_sentence, from_=len(tokenized_sentence) + accepted)\n tokenized_sentence = tokenized_sentence[:len(tokenized_sentence) + accepted]\n \n self.logprobs_history[index][len(tokenized_sentence)][not_accepted_token] = -torch.inf\n \n if torch.all(self.logprobs_history[index][len(tokenized_sentence)] == -torch.inf):\n self.logprobs_history[index][len(tokenized_sentence)] = torch.zeros_like(self.logprobs_history[index][len(tokenized_sentence)])\n \n probs_to_sample = self.create_sample_logprobs(\n self.logprobs_history[index][len(tokenized_sentence)],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n )\n new_token = torch.multinomial(probs_to_sample, 1).item()\n \n tokenized_sentence.append(new_token)\n return self.run_retroactive_operators(index, tokenized_sentence, temperature, top_k, top_p)\n \n return tokenized_sentence\n \n def speculation_sample(self, token, previous_models_probs, new_models_probs):\n \"\"\"Sample a token based on the previous and new model probabilities in the speculative sampling way. Also returns whether the token was accepted or not.\n\n Args:\n token (int): Token that is currently selected\n previous_models_probs (torch.tensor): Model probabilities of the previous models\n new_models_probs (torch.tensor): Model probabilities of the new models\n\n Returns:\n (int, bool): New token and whether or not the input token was accepted\n \"\"\"\n acceptance_prob = torch.minimum(torch.tensor(1.0), new_models_probs[token] / (previous_models_probs[token] + torch.tensor(self.epsilon)))\n # TODO: the next line is taking an enormous amount of time because of asynchronous computing on gpu's and requiring it to be returned immediately\n # Therefore do batch processing\n acceptance_prob = float(acceptance_prob)\n self.monitor.add_result(element=float(acceptance_prob), indicator=\"acceptance_prob\")\n # self.monitor.add_result(element=self.entropy(previous_models_probs).item(), indicator=\"entropy_previous\")\n # self.monitor.add_result(element=previous_models_probs[token].item(), indicator=\"probability_previous\")\n\n if torch.rand(1) < acceptance_prob:\n return token, True\n else:\n new_proba_distrib = torch.relu(new_models_probs - previous_models_probs)\n new_proba_distrib /= torch.sum(new_proba_distrib)\n new_token = torch.multinomial(new_proba_distrib, 1).item()\n return new_token, False\n \n \n def add_new_result(self, generated_tokens, num_new_tokens, runnable_operator, new_model_logprobs, top_p, top_k, temperature):\n \"\"\"Adds a new run of a runnable operator to the model prediction history. Also does speculation sampling if needed.\n\n Args:\n generated_tokens (list[list[int]]): Currently generated tokens by the model\n num_new_tokens (list[int]): Number of new tokens for each sample in the batch\n runnable_operator (RunnableOperator): Runnable operator that was run\n new_model_logprobs (List[torch.tensor]): Output of the run function of the runnable operator\n top_p (flaot): top_p to use\n top_k (int): top_k to use\n temperature (float): temperature to use\n\n Returns:\n list[bool]: For each sample in the batch, whether all tokens in that sample were kept or not\n \"\"\"\n all_kept = []\n for i in range(len(generated_tokens)):\n n_generated_tokens = len(generated_tokens[i])\n kept = True\n for n_token in range(n_generated_tokens - num_new_tokens[i] + 1, n_generated_tokens + 1):\n # initialize the model prediction history\n self.model_prediction_history[i][n_token] = self.model_prediction_history[i].get(n_token, \n {runnable_operator.id(): None for runnable_operator in self.runnable_operators})\n # check if we need to do speculation sampling, only needed when a previous token was sampled\n do_speculation_sample = n_token < n_generated_tokens\n \n # speculation sampling not needed if the model was run before \n if self.model_prediction_history[i][n_token][runnable_operator.id()] is not None:\n do_speculation_sample = False\n \n # speculation sampling not needed if all models have not been run yet: this is the first model on this token\n if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]):\n do_speculation_sample = False\n # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered)\n if self.max_index_prediction_history(i) > n_token:\n continue\n \n # add the new model logprobs\n self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1]\n \n group_model_history = self.group_model_history(self.model_prediction_history[i][n_token])\n # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated\n # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to)\n if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)):\n continue\n \n # process the logprobs\n new_model_probs = self.process_logprobs(group_model_history)\n \n if self.intermediate_argmax and not self.formula.is_finished(group_model_history):\n argmax_el = torch.argmax(new_model_probs)\n new_model_probs = torch.zeros_like(new_model_probs)\n new_model_probs[argmax_el] = 1.0\n \n if do_speculation_sample:\n if self.calculate_statistics:\n self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), \n indicator=\"expected_acceptance_prob\", runnable_operator=runnable_operator)\n\n new_token, kept = self.speculation_sample(\n token = generated_tokens[i][n_token],\n previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p),\n new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n )\n if n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n # if not kept, we change the generated tokens and remove the model prediction history after that token\n generated_tokens[i][n_token] = new_token\n generated_tokens[i] = generated_tokens[i][:n_token + 1]\n self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token)\n self.trigger_end[i] = False\n \n elif n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n break\n \n all_kept.append(kept)\n return all_kept\n \n\n def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1):\n \"\"\"Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then \n deletes history of tokens that were prediction, but then got removed because of speculation\n\n Args:\n index (int): index of the sample in the batch\n generated_tokens_index (list[int]): Generated tokens at the index\n from_ (int, optional): From which token to delete all the history. Defaults to -1.\n \"\"\"\n all_indices = list(self.model_prediction_history[index].keys())\n for token in all_indices:\n all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()])\n finished = self.formula.is_finished(self.model_prediction_history[index][token])\n if all_none or finished or (from_ != -1 and token > from_):\n if finished and len(generated_tokens_index) > token and self.calculate_statistics:\n self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token))\n \n if finished:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index])\n \n del self.model_prediction_history[index][token]\n \n if from_ > -1:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index])\n \n def max_index_prediction_history(self, index):\n \"\"\"Gets the max index of the model prediction history for a specific runnable operator \n\n Args:\n index (int): index of runnable operator in the list of runnable operators\n\n Returns:\n int: max index of its prediction\n \"\"\"\n keys = list(self.model_prediction_history[index].keys())\n if len(keys) == 0:\n return 0\n return max(self.model_prediction_history[index].keys())\n\n def normal_sample(self, probs):\n \"\"\"Samples from a probability distribution\n\n Args:\n probs (torch.tensor): Probability distribution\n\n Returns:\n int: Sampled token\n \"\"\"\n out = torch.multinomial(probs, 1)\n return out\n \n def KL_divergence(self, p, q):\n \"\"\"Compuates KL divergence between two probability distributions\n\n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n\n Returns:\n float: KL divergence\n \"\"\"\n return torch.sum(p * torch.log((p + self.epsilon) / (q + self.epsilon)))\n \n def entropy(self, p):\n \"\"\"Computes entropy of a probability distribution\n\n Args:\n p (torch.tensor): probability distribution\n\n Returns:\n float: entropy\n \"\"\"\n return -torch.sum(p * torch.log(p + self.epsilon))\n \n def expected_acceptance_prob(self, p, q):\n \"\"\"\n Calculates the expected acceptance probability of speculative sampling.\n \n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n \"\"\"\n return 1 - 1 / 2 * torch.sum(torch.abs(q - p)).item()\n \n def add_monitor_token_probs(self, token, history, history_logprobs):\n \"\"\"Adds some token probabilities to the monitor\n\n Args:\n token (int): Samples token\n history (dict): Model prediction history at the specific index where the token was drawn from\n history_logprobs (torch.tensor): LogProbability distribution from which the token was drawn.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.is_finished(history) and runnable_operator.outputs_logprobs:\n evaluated = runnable_operator.evaluate(history)\n self.monitor.add_result(element=torch.softmax(evaluated, dim=-1)[token].item(), runnable_operator=runnable_operator, indicator=\"token_prob\")\n # add logprob as well\n self.monitor.add_result(element=max(evaluated[token].item(), np.log(self.epsilon)), runnable_operator=runnable_operator, indicator=\"token_logprob\")\n # add KL divergence\n if history_logprobs is not None:\n self.monitor.add_result(element=self.KL_divergence(torch.softmax(history_logprobs, dim=-1), torch.softmax(evaluated, dim=-1)).item(), \n runnable_operator=runnable_operator, indicator=\"KL_divergence\")\n \n self.monitor.add_result(element=self.entropy(torch.softmax(history_logprobs, dim=-1)).item(), indicator=\"entropy\")\n\n def next_token_speculative(self, continuation_tokens, \n top_p=1.0, top_k=0, temperature=1.0, speculation=True, use_cache=True):\n \"\"\"Continues one step in the generation process by running the runnable operators that need to be run and then sampling from the probability distribution.\n\n Args:\n continuation_tokens (list[list[int]]): Current continuation tokens\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n temperature (float, optional): temperature to use. Defaults to 1.0.\n speculation (bool, optional): Whether to use speculation. Defaults to True.\n use_cache (bool, optional): Whether to use cache. Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n models_ran = []\n for i, runnable_operator in enumerate(self.runnable_operators):\n new_tokens = [len(continuation_tokens[j]) - self.model_last_token_prediction[i][j] + 1 for j in range(len(continuation_tokens))]\n if runnable_operator.run_condition(new_tokens, self.trigger_end) or not speculation:\n logprobs = self.forward_model(runnable_operator, continuation_tokens, model_new_tokens=new_tokens, use_cache=use_cache, do_speculation=speculation)\n all_kept = self.add_new_result(continuation_tokens, new_tokens, runnable_operator, logprobs, top_p, top_k, temperature)\n models_ran.append(i)\n \n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) + int(all_kept[j])\n for j in range(len(continuation_tokens))]\n \n if not all(all_kept):\n break\n \n to_sample_indices = [i for i in range(len(continuation_tokens)) if all_kept[i] and not self.trigger_end[i]]\n\n if len(to_sample_indices) > 0:\n # do batch sampling\n all_required_histories = torch.stack([\n self.create_sample_logprobs(\n self.logprobs_history[i][len(continuation_tokens[i])], \n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n ) for i in to_sample_indices\n ])\n new_tokens = self.normal_sample(all_required_histories)\n for i in range(len(to_sample_indices)):\n continuation_tokens[to_sample_indices[i]].append(new_tokens[i].item())\n\n for i in models_ran:\n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) for j in range(len(continuation_tokens))]\n return continuation_tokens\n\n def __call__(self, input_ids, **kwargs):\n \"\"\"Runs the forward pass of the model. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n return self.forward(input_ids, **kwargs)\n \n def forward(self, input_ids, normalize=True, **kwargs):\n \"\"\"Runs the foward pass. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n normalize (bool, optional): Whether or not to normalize the output. Defaults to True.\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n logprobs_per_model = {runnable_operator.id(): None for runnable_operator in self.runnable_operators}\n if not isinstance(input_ids, list):\n input_shape = input_ids.shape\n continuation_tokens = self.lm_eval_compatibility.forward_preprocessing(input_ids, self.model_input_tokens)\n else:\n input_shape = None\n continuation_tokens = input_ids\n\n for runnable_operator in self.runnable_operators:\n logprobs = self.forward_model(runnable_operator, continuation_tokens)\n if input_shape is not None:\n logprobs = self.lm_eval_compatibility.forward_post_processing(logprobs, input_shape)\n logprobs_per_model[runnable_operator.id()] = logprobs\n\n output = self.formula.evaluate(logprobs_per_model, normalize=normalize)\n return [output]\n\n def get_decoded_tokens(self, next_tokens_batch):\n \"\"\"Gets decoded tokens from the next tokens\n\n Args:\n next_tokens_batch (list[list[int]]): New tokens for each sample in the batch\n\n Returns:\n list[str]: Decoded tokens\n \"\"\"\n # adding eos token for compatibility with sentencepiece tokenizer\n encoded_sentences = [[self.tokenizer.eos_token_id] + next_tokens for next_tokens in next_tokens_batch]\n decoded_sentences = [self.tokenizer.decode(encoded_sentence, add_special_tokens=False) for encoded_sentence in encoded_sentences]\n decoded_next_tokens = [decoded_sentence[len(self.tokenizer.eos_token):] for decoded_sentence in decoded_sentences]\n return decoded_next_tokens\n \n def clear_memory(self):\n \"\"\"Deletes all loaded models and clears the cache\n \"\"\"\n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache()\n self.loaded_models = dict()\n torch.cuda.empty_cache()\n\n def generate_text(self, sentences, max_length=1024, stop_texts=None, batch_size=None,\n temperature=1.0, top_p=1.0, top_k=0, num_return_sequences=1, do_speculation=False, use_cache=True, **kwargs):\n \"\"\"Generates text based on the input params\n\n Args:\n sentences (list[str]): List of input sentences\n max_length (int, optional): Max generation length. Defaults to 128.\n stop_texts (list[str], optional): Strings at which to stop generation. Defaults to None.\n batch_size (int, optional): Batch size. Defaults to None (all at once).\n temperature (float, optional): temperature to use. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n num_return_sequences (int, optional): Number of return sequences per sentence. Defaults to 1.\n do_speculation (bool, optional): Whether or not to do speculation. Defaults to True.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n\n Returns:\n list[str]: List of generated texts\n \"\"\"\n assert not do_speculation or any([runnable_operator.speculative_factor == 1 for runnable_operator in self.runnable_operators])\n if isinstance(sentences, str):\n sentences = [sentences]\n if batch_size is None:\n batch_size = len(sentences)\n \n # duplicate each sentence num_return_sequences times, but keep the same sentences next to each other\n sentences = [sentence for sentence in sentences for _ in range(num_return_sequences)]\n\n self.model_prediction_history = [dict() for _ in range(batch_size)]\n self.logprobs_history = [dict() for _ in range(batch_size)]\n self.model_last_token_prediction = [[0 for _ in range(batch_size)] for _ in range(len(self.runnable_operators))]\n self.trigger_end = [False for _ in range(batch_size)]\n self.init_monitor()\n \n if stop_texts is None:\n stop_texts = []\n stop_texts.append(self.tokenizer.eos_token)\n\n start_sentences = sentences[:]\n\n log(logger.debug, f\"Generating {len(sentences)} sentences\")\n\n generated_texts = [\"\" for _ in range(len(sentences))]\n generated_tokens = [[] for _ in range(len(sentences))]\n current_indices = [i for i in range(0, min(len(sentences), batch_size))]\n next_index = len(current_indices)\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n \n total_done = 0\n while len(current_indices) > 0:\n start_time = time.time()\n generated_tokens_batch = [generated_tokens[index] for index in current_indices]\n next_tokens = self.next_token_speculative(generated_tokens_batch, top_p, top_k, \n temperature, speculation=do_speculation, use_cache=use_cache)\n for i in range(len(next_tokens)):\n next_tokens[i] = self.run_retroactive_operators(i, next_tokens[i], temperature, top_k, top_p)\n self.clear_model_prediction_history(i, next_tokens[i])\n decoded_tokens = self.get_decoded_tokens(next_tokens)\n\n for i, index in enumerate(current_indices):\n generated_tokens[index] = next_tokens[i]\n generated_texts[index] = decoded_tokens[i]\n\n indices_to_remove = []\n for i in range(len(current_indices)):\n sentences[current_indices[i]] = start_sentences[current_indices[i]] + generated_texts[current_indices[i]]\n if any([stop_text in generated_texts[current_indices[i]] for stop_text in stop_texts]) or len(generated_tokens[current_indices[i]]) >= max_length:\n if len(self.model_prediction_history[i]) == 0:\n indices_to_remove.append(i)\n else:\n self.trigger_end[i] = True\n \n for i in indices_to_remove[::-1]:\n self.monitor.add_result(element=len(generated_tokens[current_indices[i]]), indicator=\"length\")\n del current_indices[i]\n self.model_prediction_history = self.model_prediction_history[:i] + self.model_prediction_history[i + 1:]\n self.logprobs_history = self.logprobs_history[:i] + self.logprobs_history[i + 1:]\n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j] = self.model_last_token_prediction[j][:i] + self.model_last_token_prediction[j][i + 1:]\n self.trigger_end = self.trigger_end[:i] + self.trigger_end[i + 1:]\n \n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache(index=i)\n\n if next_index < len(sentences):\n current_indices.append(next_index)\n self.model_prediction_history.append(dict())\n self.logprobs_history.append(dict())\n self.trigger_end.append(False)\n \n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j].append(0)\n \n next_index += 1\n total_done += 1\n if total_done % 30 == 0:\n log(logger.debug, f\"Progress: {total_done / len(sentences):.3f}\")\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n\n self.monitor.add_result(element=time.time() - start_time)\n \n return generated_texts\n\n def generate(self, input_ids, attention_mask=None, do_sample=False, max_new_tokens=1024, \n stopping_criteria=None, temperature=1.0, top_p=1.0, top_k=0, use_cache=True, eos_token_id=None, pad_token_id=None, **kwargs):\n \"\"\"Generates text based on the input params. Needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n attention_mask (torch.tensor, optional): attention mask. Defaults to None.\n do_sample (bool, optional): Whether or not to sample. Defaults to False.\n max_new_tokens (int, optional): Max new number of tokens. Defaults to 128.\n stopping_criteria (_type_, optional): Stopping criteria to use. Defaults to None.\n temperature (float, optional): Temperature to. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n eos_token_id (int, optional): eos token id. Defaults to None.\n pad_token_id (int, optional): pad token id. Defaults to None.\n\n Returns:\n list[str]: Generated texts\n \"\"\"\n if not do_sample:\n top_k = 1\n \n batch_size = input_ids.shape[0]\n input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]\n stopping_sequences = [self.tokenizer.eos_token]\n if stopping_criteria is not None:\n stopping_sequences += [criteria.sequence for criteria in stopping_criteria]\n if eos_token_id is not None:\n stopping_sequences += [self.tokenizer.decode([eos_token_id])]\n \n texts = self.generate_text(input_texts, max_length=max_new_tokens, stop_texts=stopping_sequences,\n batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, use_cache=use_cache)\n encoded_texts = self.tokenizer.batch_encode_plus(texts, add_special_tokens=False, return_tensors=\"pt\").input_ids.to(self.device)\n # concatenate the input_ids with the encoded_texts\n all_encoded = torch.cat([input_ids, encoded_texts], dim=-1)\n return all_encoded" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" } ]
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,098
scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency)
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"] tokenizer = load_tokenizer(model_name_fluency)
model = load_model(model_name_fluency, dtype=dtype)
2
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/renderer.py
[ { "identifier": "custom_meshgrid", "path": "nerf/utils.py", "snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshgrid(*args)\n else:\n return torch.meshgrid(*args, indexing='ij')" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n def __init__(self, opt):\n super().__init__()\n\n self.opt = opt\n self.bound = opt.bound\n self.cascade = 1 + math.ceil(math.log2(opt.bound))\n self.grid_size = 128\n self.max_level = None\n self.dmtet = opt.dmtet\n self.cuda_ray = opt.cuda_ray\n self.taichi_ray = opt.taichi_ray\n self.min_near = opt.min_near\n self.density_thresh = opt.density_thresh\n\n # prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)\n # NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.\n aabb_train = torch.FloatTensor(\n [-opt.bound, -opt.bound, -opt.bound, opt.bound, opt.bound, opt.bound])\n aabb_infer = aabb_train.clone()\n self.register_buffer('aabb_train', aabb_train)\n self.register_buffer('aabb_infer', aabb_infer)\n\n self.glctx = None\n\n # extra state for cuda raymarching\n if self.cuda_ray:\n ## NOTE TODO the cuda ray sampling for DNeRF is different, make sure to change\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n\n # load dmtet vertices\n if self.opt.dmtet:\n self.dmtet = DMTetGeometry(opt.tet_grid_size, opt.tet_mlp, opt).to(opt.device)\n if self.opt.h <= 2048 and self.opt.w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n if self.taichi_ray:\n from einops import rearrange\n from taichi_modules import RayMarcherTaichi\n from taichi_modules import VolumeRendererTaichi\n from taichi_modules import RayAABBIntersector as RayAABBIntersectorTaichi\n from taichi_modules import raymarching_test as raymarching_test_taichi\n from taichi_modules import composite_test as composite_test_fw\n from taichi_modules import packbits as packbits_taichi\n self.rearrange = rearrange\n self.packbits_taichi = packbits_taichi\n self.ray_aabb_intersector = RayAABBIntersectorTaichi\n self.raymarching_test_taichi = raymarching_test_taichi\n self.composite_test_fw = composite_test_fw\n self.ray_marching = RayMarcherTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n self.volume_render = VolumeRendererTaichi(\n batch_size=4096) # TODO: hard encoded batch size\n # density grid\n density_grid = torch.zeros(\n [self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]\n density_bitfield = torch.zeros(\n self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]\n self.register_buffer('density_grid', density_grid)\n self.register_buffer('density_bitfield', density_bitfield)\n self.mean_density = 0\n self.iter_density = 0\n \n if self.opt.density_activation == 'exp':\n self.density_activation = trunc_exp\n elif self.opt.density_activation == 'softplus':\n self.density_activation = F.softplus\n elif self.opt.density_activation == 'relu':\n self.density_activation = F.relu\n \n # ref: https://github.com/zhaofuq/Instant-NSR/blob/main/nerf/network_sdf.py#L192\n def finite_difference_normal(self, x, epsilon=1e-2):\n # x: [N, 3]\n # ipdb.set_trace()\n dx_pos, _ = self.common_forward((x + torch.tensor([[epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dx_neg, _ = self.common_forward((x + torch.tensor([[-epsilon, 0.00, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_pos, _ = self.common_forward((x + torch.tensor([[0.00, epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dy_neg, _ = self.common_forward((x + torch.tensor([[0.00, -epsilon, 0.00]], device=x.device)).clamp(-self.bound, self.bound))\n dz_pos, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n dz_neg, _ = self.common_forward((x + torch.tensor([[0.00, 0.00, -epsilon]], device=x.device)).clamp(-self.bound, self.bound))\n \n normal = torch.stack([\n 0.5 * (dx_pos - dx_neg) / epsilon, \n 0.5 * (dy_pos - dy_neg) / epsilon, \n 0.5 * (dz_pos - dz_neg) / epsilon\n ], dim=-1)\n\n return -normal\n \n def normal(self, x):\n normal = self.finite_difference_normal(x)\n normal = safe_normalize(normal)\n normal = torch.nan_to_num(normal)\n return normal\n\n @torch.no_grad()\n def density_blob(self, x):\n # x: [B, N, 3]\n\n d = (x ** 2).sum(-1)\n\n if self.opt.density_activation == 'exp':\n g = self.opt.blob_density * \\\n torch.exp(- d / (2 * self.opt.blob_radius ** 2))\n else:\n g = self.opt.blob_density * \\\n (1 - torch.sqrt(d) / self.opt.blob_radius)\n\n return g\n\n def forward(self, x, d):\n raise NotImplementedError()\n\n def density(self, x):\n raise NotImplementedError()\n\n def reset_extra_state(self):\n if not (self.cuda_ray or self.taichi_ray):\n return\n # density grid\n self.density_grid.zero_()\n self.mean_density = 0\n self.iter_density = 0\n\n @torch.no_grad()\n def export_mesh(self, path, resolution=None, decimate_target=-1, S=128):\n from meshutils import decimate_mesh, clean_mesh, poisson_mesh_reconstruction\n if self.opt.dmtet:\n vertices, triangles = self.dmtet.get_verts_face()\n vertices = vertices.detach().cpu().numpy()\n triangles = triangles.detach().cpu().numpy()\n\n else:\n\n if resolution is None:\n resolution = self.grid_size\n\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh) \\\n if np.greater(self.mean_density, 0) else self.density_thresh\n else:\n density_thresh = self.density_thresh\n\n sigmas = np.zeros(\n [resolution, resolution, resolution], dtype=np.float32)\n\n # query\n X = torch.linspace(-1, 1, resolution).split(S)\n Y = torch.linspace(-1, 1, resolution).split(S)\n Z = torch.linspace(-1, 1, resolution).split(S)\n\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n pts = torch.cat(\n [xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [S, 3]\n val = self.density(pts.to(self.aabb_train.device))\n sigmas[xi * S: xi * S + len(xs), yi * S: yi * S + len(ys), zi * S: zi * S + len(\n zs)] = val['sigma'].reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() # [S, 1] --> [x, y, z]\n\n logger.info(\n f'[INFO] marching cubes thresh: {density_thresh} ({sigmas.min()} ~ {sigmas.max()})')\n\n vertices, triangles = mcubes.marching_cubes(sigmas, density_thresh)\n vertices = vertices / (resolution - 1.0) * 2 - 1\n\n # clean\n vertices = vertices.astype(np.float32)\n triangles = triangles.astype(np.int32)\n vertices, triangles = clean_mesh(\n vertices, triangles, remesh=True, remesh_size=0.01)\n\n # decimation\n if decimate_target > 0 and triangles.shape[0] > decimate_target:\n vertices, triangles = decimate_mesh(\n vertices, triangles, decimate_target)\n\n v = torch.from_numpy(vertices).contiguous(\n ).float().to(self.aabb_train.device)\n f = torch.from_numpy(triangles).contiguous().int().to(\n self.aabb_train.device)\n\n # mesh = trimesh.Trimesh(vertices, triangles, process=False) # important, process=True leads to seg fault...\n # mesh.export(os.path.join(path, f'mesh.ply'))\n\n def _export(v, f, h0=2048, w0=2048, ssaa=1, name=''):\n # v, f: torch Tensor\n device = v.device\n v_np = v.cpu().numpy() # [N, 3]\n f_np = f.cpu().numpy() # [M, 3]\n\n logger.info(\n f'[INFO] running xatlas to unwrap UVs for mesh: v={v_np.shape} f={f_np.shape}')\n\n # unwrap uvs\n import xatlas\n import nvdiffrast.torch as dr\n from sklearn.neighbors import NearestNeighbors\n from scipy.ndimage import binary_dilation, binary_erosion\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(v_np, f_np)\n chart_options = xatlas.ChartOptions()\n chart_options.max_iterations = 4 # for faster unwrap...\n atlas.generate(chart_options=chart_options)\n vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2]\n\n # vmapping, ft_np, vt_np = xatlas.parametrize(v_np, f_np) # [N], [M, 3], [N, 2]\n\n vt = torch.from_numpy(vt_np.astype(np.float32)).float().to(device)\n ft = torch.from_numpy(ft_np.astype(np.int64)).int().to(device)\n\n # render uv maps\n uv = vt * 2.0 - 1.0 # uvs to range [-1, 1]\n uv = torch.cat((uv, torch.zeros_like(\n uv[..., :1]), torch.ones_like(uv[..., :1])), dim=-1) # [N, 4]\n\n if ssaa > 1:\n h = int(h0 * ssaa)\n w = int(w0 * ssaa)\n else:\n h, w = h0, w0\n \n if self.glctx is None:\n if h <= 2048 and w <= 2048:\n self.glctx = dr.RasterizeCudaContext()\n else:\n self.glctx = dr.RasterizeGLContext()\n\n rast, _ = dr.rasterize(self.glctx, uv.unsqueeze(\n 0), ft, (h, w)) # [1, h, w, 4]\n xyzs, _ = dr.interpolate(v.unsqueeze(0), rast, f) # [1, h, w, 3]\n mask, _ = dr.interpolate(torch.ones_like(\n v[:, :1]).unsqueeze(0), rast, f) # [1, h, w, 1]\n\n # masked query \n xyzs = xyzs.view(-1, 3)\n mask = (mask > 0).view(-1)\n \n feats = torch.zeros(h * w, 3, device=device, dtype=torch.float32)\n\n if mask.any():\n xyzs = xyzs[mask] # [M, 3]\n\n # batched inference to avoid OOM\n all_feats = []\n head = 0\n while head < xyzs.shape[0]:\n tail = min(head + 640000, xyzs.shape[0])\n results_ = self.density(xyzs[head:tail])\n all_feats.append(results_['albedo'].float())\n head += 640000\n\n feats[mask] = torch.cat(all_feats, dim=0)\n \n feats = feats.view(h, w, -1)\n mask = mask.view(h, w)\n\n # quantize [0.0, 1.0] to [0, 255]\n feats = feats.cpu().numpy()\n feats = (feats * 255).astype(np.uint8)\n\n ### NN search as an antialiasing ...\n mask = mask.cpu().numpy()\n\n inpaint_region = binary_dilation(mask, iterations=3)\n inpaint_region[mask] = 0\n\n search_region = mask.copy()\n not_search_region = binary_erosion(search_region, iterations=2)\n search_region[not_search_region] = 0\n\n search_coords = np.stack(np.nonzero(search_region), axis=-1)\n inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)\n\n knn = NearestNeighbors(n_neighbors=1, algorithm='kd_tree').fit(search_coords)\n _, indices = knn.kneighbors(inpaint_coords)\n\n feats[tuple(inpaint_coords.T)] = feats[tuple(search_coords[indices[:, 0]].T)]\n\n feats = cv2.cvtColor(feats, cv2.COLOR_RGB2BGR)\n\n # do ssaa after the NN search, in numpy\n if ssaa > 1:\n feats = cv2.resize(feats, (w0, h0), interpolation=cv2.INTER_LINEAR)\n\n cv2.imwrite(os.path.join(path, f'{name}albedo.png'), feats)\n\n # save obj (v, vt, f /)\n obj_file = os.path.join(path, f'{name}mesh.obj')\n mtl_file = os.path.join(path, f'{name}mesh.mtl')\n\n logger.info(f'[INFO] writing obj mesh to {obj_file}')\n with open(obj_file, \"w\") as fp:\n fp.write(f'mtllib {name}mesh.mtl \\n')\n\n logger.info(f'[INFO] writing vertices {v_np.shape}')\n for v in v_np:\n fp.write(f'v {v[0]} {v[1]} {v[2]} \\n')\n\n logger.info(\n f'[INFO] writing vertices texture coords {vt_np.shape}')\n for v in vt_np:\n fp.write(f'vt {v[0]} {1 - v[1]} \\n')\n\n logger.info(f'[INFO] writing faces {f_np.shape}')\n fp.write(f'usemtl mat0 \\n')\n for i in range(len(f_np)):\n fp.write(\n f\"f {f_np[i, 0] + 1}/{ft_np[i, 0] + 1} {f_np[i, 1] + 1}/{ft_np[i, 1] + 1} {f_np[i, 2] + 1}/{ft_np[i, 2] + 1} \\n\")\n\n with open(mtl_file, \"w\") as fp:\n fp.write(f'newmtl mat0 \\n')\n fp.write(f'Ka 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Kd 1.000000 1.000000 1.000000 \\n')\n fp.write(f'Ks 0.000000 0.000000 0.000000 \\n')\n fp.write(f'Tr 1.000000 \\n')\n fp.write(f'illum 1 \\n')\n fp.write(f'Ns 0.000000 \\n')\n fp.write(f'map_Kd {name}albedo.png \\n')\n\n _export(v, f)\n\n def run(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # bg_color: [BN, 3] in range [0, 1]\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n results = {}\n\n # choose aabb\n aabb = self.aabb_train if self.training else self.aabb_infer\n\n # sample steps\n # nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, aabb, self.min_near)\n # nears.unsqueeze_(-1)\n # fars.unsqueeze_(-1)\n nears, fars = near_far_from_bound(rays_o, rays_d, self.bound, type='sphere', min_near=self.min_near)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n \n #print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')\n\n z_vals = torch.linspace(0.0, 1.0, self.opt.num_steps, device=device).unsqueeze(0) # [1, T]\n z_vals = z_vals.expand((N, self.opt.num_steps)) # [N, T]\n z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]\n\n # perturb z_vals\n sample_dist = (fars - nears) / self.opt.num_steps\n if perturb:\n z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist\n #z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.\n\n # generate xyzs\n xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]\n xyzs = torch.min(torch.max(xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n #plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n\n # query SDF and RGB\n density_outputs = self.density(xyzs.reshape(-1, 3))\n\n #sigmas = density_outputs['sigma'].view(N, self.opt.num_steps) # [N, T]\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(N, self.opt.num_steps, -1)\n\n # upsample z_vals (nerf-like)\n if self.opt.upsample_steps > 0:\n with torch.no_grad():\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1) ## confused with this, so the last point should be around relative distance or zero?\n\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T]\n\n # sample new z_vals\n z_vals_mid = (z_vals[..., :-1] + 0.5 * deltas[..., :-1]) # [N, T-1]\n new_z_vals = sample_pdf(z_vals_mid, weights[:, 1:-1], self.opt.upsample_steps, det=not self.training).detach() # [N, t]\n\n new_xyzs = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * new_z_vals.unsqueeze(-1) # [N, 1, 3] * [N, t, 1] -> [N, t, 3]\n new_xyzs = torch.min(torch.max(new_xyzs, aabb[:3]), aabb[3:]) # a manual clip.\n\n # only forward new points to save computation\n new_density_outputs = self.density(new_xyzs.reshape(-1, 3))\n #new_sigmas = new_density_outputs['sigma'].view(N, self.opt.upsample_steps) # [N, t]\n for k, v in new_density_outputs.items():\n new_density_outputs[k] = v.view(N, self.opt.upsample_steps, -1)\n\n # re-order\n z_vals = torch.cat([z_vals, new_z_vals], dim=1) # [N, T+t]\n z_vals, z_index = torch.sort(z_vals, dim=1)\n\n xyzs = torch.cat([xyzs, new_xyzs], dim=1) # [N, T+t, 3]\n xyzs = torch.gather(xyzs, dim=1, index=z_index.unsqueeze(-1).expand_as(xyzs))\n\n for k in density_outputs:\n tmp_output = torch.cat([density_outputs[k], new_density_outputs[k]], dim=1)\n density_outputs[k] = torch.gather(tmp_output, dim=1, index=z_index.unsqueeze(-1).expand_as(tmp_output))\n\n deltas = z_vals[..., 1:] - z_vals[..., :-1] # [N, T+t-1]\n deltas = torch.cat([deltas, sample_dist * torch.ones_like(deltas[..., :1])], dim=-1)\n alphas = 1 - torch.exp(-deltas * density_outputs['sigma'].squeeze(-1)) # [N, T+t]\n alphas_shifted = torch.cat([torch.ones_like(alphas[..., :1]), 1 - alphas + 1e-15], dim=-1) # [N, T+t+1]\n weights = alphas * torch.cumprod(alphas_shifted, dim=-1)[..., :-1] # [N, T+t]\n\n dirs = rays_d.view(-1, 1, 3).expand_as(xyzs)\n light_d = light_d.view(-1, 1, 3).expand_as(xyzs)\n for k, v in density_outputs.items():\n density_outputs[k] = v.view(-1, v.shape[-1])\n\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs.reshape(-1, 3), dirs.reshape(-1, 3), light_d.reshape(-1,3), ratio=ambient_ratio, shading=shading)\n rgbs = rgbs.view(N, -1, 3) # [N, T+t, 3]\n if normals is not None:\n normals = normals.view(N, -1, 3)\n\n # calculate weight_sum (mask)\n weights_sum = weights.sum(dim=-1) # [N]\n \n # calculate depth \n depth = torch.sum(weights * z_vals, dim=-1)\n\n # calculate color\n image = torch.sum(weights.unsqueeze(-1) * rgbs, dim=-2) # [N, 3], in [0, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n # ipdb.set_trace()\n image = image.view(*prefix, 3)\n depth = depth.view(*prefix)\n weights_sum = weights_sum.reshape(*prefix)\n\n if self.training:\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss\n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.sum(-1).mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n normal_image = torch.sum(\n weights.unsqueeze(-1) * (normals + 1) / 2, dim=-2) # [N, 3], in [0, 1]\n results['normal_image'] = normal_image\n \n results['image'] = image\n results['depth'] = depth\n results['weights'] = weights\n results['weights_sum'] = weights_sum\n\n return results\n\n\n def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: image: [B, N, 3], depth: [B, N]\n # ipdb.set_trace()\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n nears, fars = raymarching.near_far_from_aabb(rays_o, rays_d, self.aabb_train if self.training else self.aabb_infer)\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n if self.training:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n else:\n light_d = safe_normalize(rays_o[0:1] + torch.randn(3, device=rays_o.device)) # [N, 3]\n\n results = {}\n\n if self.training:\n xyzs, dirs, ts, rays = raymarching.march_rays_train(rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n\n if light_d.shape[0] > 1:\n flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long()\n light_d = light_d[flatten_rays]\n \n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n weights, weights_sum, depth, image = raymarching.composite_rays_train(sigmas, rgbs, ts, rays, T_thresh, binarize)\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize)\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = nears.clone() # [N]\n\n step = 0\n \n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n n_step = max(min(N // n_alive, 8), 1)\n\n xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, rays_o, rays_d, self.bound, self.density_bitfield, self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps)\n dirs = safe_normalize(dirs)\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize)\n\n rays_alive = rays_alive[rays_alive >= 0]\n #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}')\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + (1 - weights_sum).unsqueeze(-1) * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n def get_sdf_albedo_for_init(self, points=None):\n output = self.density(self.dmtet.verts if points is None else points)\n sigma, albedo = output['sigma'], output['albedo']\n return sigma - self.density_thresh, albedo\n\n def run_dmtet(self, rays_o, rays_d, mvp, h, w, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, **kwargs):\n # mvp: [B, 4, 4]\n\n device = mvp.device\n campos = rays_o[:, 0, :] # only need one ray per batch\n\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = safe_normalize(campos + torch.randn_like(campos)).view(-1, 1, 1, 3) # [B, 1, 1, 3]\n\n results = {}\n\n verts, faces = self.dmtet.get_verts_face()\n\n # get normals\n i0, i1, i2 = faces[:, 0], faces[:, 1], faces[:, 2]\n v0, v1, v2 = verts[i0, :], verts[i1, :], verts[i2, :]\n\n faces = faces.int()\n \n face_normals = torch.cross(v1 - v0, v2 - v0)\n face_normals = safe_normalize(face_normals)\n \n vn = torch.zeros_like(verts)\n vn.scatter_add_(0, i0[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i1[:, None].repeat(1,3), face_normals)\n vn.scatter_add_(0, i2[:, None].repeat(1,3), face_normals)\n\n vn = torch.where(torch.sum(vn * vn, -1, keepdim=True) > 1e-20, vn, torch.tensor([0.0, 0.0, 1.0], dtype=torch.float32, device=vn.device))\n\n # rasterization\n verts_clip = torch.bmm(F.pad(verts, pad=(0, 1), mode='constant', value=1.0).unsqueeze(0).repeat(mvp.shape[0], 1, 1), \n mvp.permute(0,2,1)).float() # [B, N, 4]\n rast, rast_db = dr.rasterize(self.glctx, verts_clip, faces, (h, w))\n \n alpha, _ = dr.interpolate(torch.ones_like(verts[:, :1]).unsqueeze(0), rast, faces) # [B, H, W, 1]\n xyzs, _ = dr.interpolate(verts.unsqueeze(0), rast, faces) # [B, H, W, 3]\n normal, _ = dr.interpolate(vn.unsqueeze(0).contiguous(), rast, faces)\n normal = safe_normalize(normal)\n\n xyzs = xyzs.view(-1, 3)\n mask = (alpha > 0).view(-1).detach()\n\n # do the lighting here since we have normal from mesh now.\n albedo = torch.zeros_like(xyzs, dtype=torch.float32)\n if mask.any():\n masked_albedo = self.density(xyzs[mask])['albedo']\n albedo[mask] = masked_albedo.float()\n albedo = albedo.view(-1, h, w, 3)\n\n if shading == 'albedo':\n color = albedo\n elif shading == 'textureless':\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = lambertian.unsqueeze(-1).repeat(1, 1, 1, 3)\n elif shading == 'normal':\n color = (normal + 1) / 2\n else: # 'lambertian'\n lambertian = ambient_ratio + (1 - ambient_ratio) * (normal * light_d).sum(-1).float().clamp(min=0)\n color = albedo * lambertian.unsqueeze(-1)\n\n color = dr.antialias(color, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n alpha = dr.antialias(alpha, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 1]\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n \n if torch.is_tensor(bg_color) and len(bg_color.shape) > 1:\n bg_color = bg_color.view(-1, h, w, 3)\n \n depth = rast[:, :, :, [2]] # [B, H, W]\n color = color + (1 - alpha) * bg_color\n\n results['depth'] = depth \n results['image'] = color\n results['weights_sum'] = alpha.squeeze(-1)\n\n normal_image = dr.antialias((normal + 1) / 2, rast, verts_clip, faces).clamp(0, 1) # [B, H, W, 3]\n results['normal_image'] = normal_image\n \n # regularizations\n if self.training:\n if self.opt.lambda_mesh_normal > 0:\n results['loss_normal'] = normal_consistency(\n face_normals, faces)\n if self.opt.lambda_mesh_lap > 0:\n results['loss_lap'] = laplacian_smooth_loss(verts, faces)\n\n return results\n\n def run_taichi(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, **kwargs):\n # rays_o, rays_d: [B, N, 3], assumes B == 1\n # return: image: [B, N, 3], depth: [B, N]\n\n prefix = rays_o.shape[:-1]\n rays_o = rays_o.contiguous().view(-1, 3)\n rays_d = rays_d.contiguous().view(-1, 3)\n\n N = rays_o.shape[0] # N = B * N, in fact\n device = rays_o.device\n\n # pre-calculate near far\n exp_step_factor = kwargs.get('exp_step_factor', 0.)\n MAX_SAMPLES = 1024\n NEAR_DISTANCE = 0.01\n center = torch.zeros(1, 3)\n half_size = torch.ones(1, 3)\n _, hits_t, _ = self.ray_aabb_intersector.apply(rays_o, rays_d, center, half_size, 1)\n hits_t[(hits_t[:, 0, 0] >= 0) & (hits_t[:, 0, 0] < NEAR_DISTANCE), 0, 0] = NEAR_DISTANCE\n\n # TODO: should sample different light_d for each batch... but taichi end doesn't have a flatten_ray implemented currently...\n # random sample light_d if not provided\n if light_d is None:\n # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face)\n light_d = (rays_o[0] + torch.randn(3, device=device, dtype=torch.float))\n light_d = safe_normalize(light_d)\n\n results = {}\n\n if self.training:\n rays_a, xyzs, dirs, deltas, ts, _ = self.ray_marching(rays_o, rays_d, hits_t[:, 0], self.density_bitfield, self.cascade, self.bound, exp_step_factor, self.grid_size, MAX_SAMPLES)\n dirs = safe_normalize(dirs)\n # plot_pointcloud(xyzs.reshape(-1, 3).detach().cpu().numpy())\n sigmas, rgbs, normals = self(xyzs, dirs, light_d, ratio=ambient_ratio, shading=shading)\n _, weights_sum, depth, image, weights = self.volume_render(sigmas, rgbs, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n \n # normals related regularizations\n if self.opt.lambda_orient > 0 and normals is not None:\n # orientation loss \n loss_orient = weights.detach() * (normals * dirs).sum(-1).clamp(min=0) ** 2\n results['loss_orient'] = loss_orient.mean()\n \n if self.opt.lambda_3d_normal_smooth > 0 and normals is not None:\n normals_perturb = self.normal(xyzs + torch.randn_like(xyzs) * 1e-2)\n results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean()\n \n if normals is not None:\n _, _, _, normal_image, _ = self.volume_render(sigmas.detach(), (normals + 1) / 2, deltas, ts, rays_a, kwargs.get('T_threshold', 1e-4))\n results['normal_image'] = normal_image\n \n # weights normalization\n results['weights'] = weights\n\n else:\n \n # allocate outputs \n dtype = torch.float32\n \n weights_sum = torch.zeros(N, dtype=dtype, device=device)\n depth = torch.zeros(N, dtype=dtype, device=device)\n image = torch.zeros(N, 3, dtype=dtype, device=device)\n \n n_alive = N\n rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N]\n rays_t = hits_t[:, 0, 0]\n step = 0\n \n min_samples = 1 if exp_step_factor == 0 else 4\n\n while step < self.opt.max_steps: # hard coded max step\n\n # count alive rays \n n_alive = rays_alive.shape[0]\n\n # exit loop\n if n_alive <= 0:\n break\n\n # decide compact_steps\n # n_step = max(min(N // n_alive, 8), 1)\n n_step = max(min(N // n_alive, 64), min_samples)\n\n xyzs, dirs, deltas, ts, N_eff_samples = \\\n self.raymarching_test_taichi(rays_o, rays_d, hits_t[:, 0], rays_alive,\n self.density_bitfield, self.cascade,\n self.bound, exp_step_factor,\n self.grid_size, MAX_SAMPLES, n_step)\n\n xyzs = self.rearrange(xyzs, 'n1 n2 c -> (n1 n2) c')\n dirs = self.rearrange(dirs, 'n1 n2 c -> (n1 n2) c')\n dirs = safe_normalize(dirs)\n valid_mask = ~torch.all(dirs == 0, dim=1)\n if valid_mask.sum() == 0:\n break\n\n sigmas = torch.zeros(len(xyzs), device=device)\n rgbs = torch.zeros(len(xyzs), 3, device=device)\n normals = torch.zeros(len(xyzs), 3, device=device)\n\n sigmas[valid_mask], _rgbs, normals = self(xyzs[valid_mask], dirs[valid_mask], light_d, ratio=ambient_ratio, shading=shading)\n rgbs[valid_mask] = _rgbs.float()\n sigmas = self.rearrange(sigmas, '(n1 n2) -> n1 n2', n2=n_step)\n rgbs = self.rearrange(rgbs, '(n1 n2) c -> n1 n2 c', n2=n_step)\n if normals is not None:\n normals = self.rearrange(normals, '(n1 n2) c -> n1 n2 c', n2=n_step)\n\n self.composite_test_fw(sigmas, rgbs, deltas, ts, hits_t[:,0], rays_alive,\n kwargs.get('T_threshold', 1e-4), N_eff_samples,\n weights_sum, depth, image)\n\n rays_alive = rays_alive[rays_alive >= 0]\n\n step += n_step\n\n # mix background color\n if bg_color is None:\n if self.opt.bg_radius > 0:\n # use the bg model to calculate bg_color\n bg_color = self.background(rays_d) # [N, 3]\n else:\n bg_color = 1\n\n image = image + self.rearrange(1 - weights_sum, 'n -> n 1') * bg_color\n image = image.view(*prefix, 3)\n\n depth = depth.view(*prefix)\n\n weights_sum = weights_sum.reshape(*prefix)\n\n results['image'] = image\n results['depth'] = depth\n results['weights_sum'] = weights_sum\n \n return results\n\n\n @torch.no_grad()\n def update_extra_state(self, decay=0.95, S=128):\n # call before each epoch to update extra states.\n\n if not (self.cuda_ray or self.taichi_ray):\n return \n \n ### update density grid\n tmp_grid = - torch.ones_like(self.density_grid)\n \n X = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.aabb_train.device).split(S)\n\n for xs in X:\n for ys in Y:\n for zs in Z:\n \n # construct points\n xx, yy, zz = custom_meshgrid(xs, ys, zs)\n coords = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) # [N, 3], in [0, 128)\n indices = raymarching.morton3D(coords).long() # [N]\n xyzs = 2 * coords.float() / (self.grid_size - 1) - 1 # [N, 3] in [-1, 1]\n\n # cascading\n for cas in range(self.cascade):\n bound = min(2 ** cas, self.bound)\n half_grid_size = bound / self.grid_size\n # scale to current cascade's resolution\n cas_xyzs = xyzs * (bound - half_grid_size)\n # add noise in [-hgs, hgs]\n cas_xyzs += (torch.rand_like(cas_xyzs) * 2 - 1) * half_grid_size\n # query density\n sigmas = self.density(cas_xyzs)['sigma'].reshape(-1).detach()\n # assign \n tmp_grid[cas, indices] = sigmas\n # ema update\n valid_mask = self.density_grid >= 0\n self.density_grid[valid_mask] = torch.maximum(self.density_grid[valid_mask] * decay, tmp_grid[valid_mask])\n self.mean_density = torch.mean(self.density_grid[valid_mask]).item()\n self.iter_density += 1\n\n # convert to bitfield\n density_thresh = min(self.mean_density, self.density_thresh)\n if self.cuda_ray:\n self.density_bitfield = raymarching.packbits(self.density_grid, density_thresh, self.density_bitfield)\n elif self.taichi_ray:\n self.packbits_taichi(self.density_grid.reshape(-1).contiguous(), density_thresh, self.density_bitfield)\n\n # print(f'[density grid] min={self.density_grid.min().item():.4f}, max={self.density_grid.max().item():.4f}, mean={self.mean_density:.4f}, occ_rate={(self.density_grid > density_thresh).sum() / (128**3 * self.cascade):.3f}')\n\n\n def render(self, rays_o, rays_d, mvp, h, w, staged=False, max_ray_batch=4096, **kwargs):\n # rays_o, rays_d: [B, N, 3]\n # return: pred_rgb: [B, N, 3]\n B, N = rays_o.shape[:2]\n device = rays_o.device\n\n if self.dmtet:\n results = self.run_dmtet(rays_o, rays_d, mvp, h, w, **kwargs)\n elif self.cuda_ray:\n results = self.run_cuda(rays_o, rays_d, **kwargs)\n elif self.taichi_ray:\n results = self.run_taichi(rays_o, rays_d, **kwargs)\n else:\n if staged:\n depth = torch.empty((B, N), device=device)\n image = torch.empty((B, N, 3), device=device)\n weights_sum = torch.empty((B, N), device=device)\n\n for b in range(B):\n head = 0\n while head < N:\n tail = min(head + max_ray_batch, N)\n results_ = self.run(rays_o[b:b+1, head:tail], rays_d[b:b+1, head:tail], **kwargs)\n depth[b:b+1, head:tail] = results_['depth']\n weights_sum[b:b+1, head:tail] = results_['weights_sum']\n image[b:b+1, head:tail] = results_['image']\n head += max_ray_batch\n \n results = {}\n results['depth'] = depth\n results['image'] = image\n results['weights_sum'] = weights_sum\n\n else:\n results = self.run(rays_o, rays_d, **kwargs)\n\n return results\n\n def init_tet_from_nerf(self, reset_scale=True):\n sdf = self.get_sdf_from_nerf(reset_scale=reset_scale)\n self.dmtet.init_tet_from_sdf(sdf)\n logger.info(f'init dmtet from NeRF Done ...')\n\n\n @torch.no_grad()\n def get_sdf_from_nerf(self, reset_scale=True):\n if self.cuda_ray:\n density_thresh = min(self.mean_density, self.density_thresh)\n else:\n density_thresh = self.density_thresh\n\n if reset_scale:\n # init scale\n sigma = self.density(self.dmtet.verts)[\n 'sigma'] # verts covers [-1, 1] now\n mask = sigma > density_thresh\n valid_verts = self.dmtet.verts[mask]\n tet_scale = valid_verts.abs().amax(dim=0) + 1e-1\n self.dmtet.reset_tet_scale(tet_scale)\n sdf = (self.density(self.dmtet.verts)[\n 'sigma'] - density_thresh).clamp(-1, 1)\n return sdf" } ]
import os import math import cv2 import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import raymarching import logging from tqdm import tqdm from nerf.utils import custom_meshgrid, safe_normalize from nerf.renderer import NeRFRenderer
15,610
if normals is not None: # _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize) # results['normal_image'] = normal_image.view(*prefix, 3) normal_image = torch.stack(normal_image, dim=0) # F,N,3 results['normal_image'] = normal_image.view(batch_size, num_frames, -1, 3) # B,F,N,3 # weights normalization results['weights'] = weights # N'*F else: image_all = [] weights_sum_all = [] depth_all = [] # ipdb.set_trace() for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) # allocate outputs dtype = torch.float32 weights_sum = torch.zeros(N, dtype=dtype, device=device) depth = torch.zeros(N, dtype=dtype, device=device) image = torch.zeros(N, 3, dtype=dtype, device=device) n_alive = N rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] rays_t = nears.clone() # [N] ## test must use the same step = 0 while step < self.opt.max_steps: # hard coded max step # count alive rays n_alive = rays_alive.shape[0] # exit loop if n_alive <= 0: break # decide compact_steps n_step = max(min(N // n_alive, 8), 1) xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, _rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) s_time = torch.zeros((xyzs.size(0),1), device=xyzs.device, dtype=xyzs.dtype) + time[0, frame_idx].item() # ipdb.set_trace() sigmas, rgbs, normals, _ = self(xyzs, dirs, _light_d[:1], ratio=ambient_ratio, shading=shading, t=s_time) sigmas = self.density_scale * sigmas raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize) rays_alive = rays_alive[rays_alive >= 0] #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}') step += n_step image_all.append(image) weights_sum_all.append(weights_sum) depth_all.append(depth) # ipdb.set_trace() weights_sum = torch.stack(weights_sum_all, dim=0) # F,N depth = torch.stack(depth_all, dim=0) # F,N image = torch.stack(image_all, dim=0) # F,N,3 # mix background color ## when bg_radius < 0 -> the way Magic123, during training, bg_color is always a random color, during inference, always 1 # ipdb.set_trace() if bg_color is None: if self.opt.bg_radius > 0 and self.bg_net is not None: # use the bg model to calculate bg_color ## NOTE here the camera should be fixed in the video ## rays_d F,N,3 bg_color = self.background(rays_d.reshape(-1, 3)) # [FN, 3] # this is irrelavant to time bg_color = bg_color.reshape(batch_size, num_frames, -1, 3) # F,N,3 else: bg_color = 1 image_wo_bg = image.view(batch_size, num_frames, -1, 3) image = image + (1 - weights_sum).unsqueeze(-1) * bg_color image = image.view(batch_size, num_frames, -1, 3) depth = depth.view(batch_size, num_frames, -1) weights_sum = weights_sum.reshape(batch_size, num_frames, -1) results['image'] = image # B,F,N,3 results['depth'] = depth # B,F,N results['weights_sum'] = weights_sum # B,F,N results['image_wo_bg'] = image_wo_bg # B,F,N,3 # ipdb.set_trace() return results @torch.no_grad() def update_extra_state(self, decay=0.95, S=128): # call before each epoch to update extra states. if not (self.cuda_ray): return if self.taichi_ray: raise NotImplementedError ### update density grid tmp_grid = - torch.ones_like(self.density_grid) # full update. # if self.iter_density < 16: # update only 16 times if True: # full update X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) for t, time in enumerate(self.times): for xs in X: for ys in Y: for zs in Z: # construct points
logger = logging.getLogger(__name__) class DNeRFRenderer(NeRFRenderer): def __init__(self, opt): super().__init__(opt) self.time_size = opt.get("time_size", 1) self.density_scale = opt.get("density_scale", 1) self.dynamic_ft = opt.get("dynamic_ft", False) # extra state for cuda raymarching if self.cuda_ray: # density grid (with an extra time dimension) density_grid = torch.zeros(self.time_size, self.cascade, self.grid_size ** 3) # [T, CAS, H * H * H] density_bitfield = torch.zeros(self.time_size, self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [T, CAS * H * H * H // 8] self.register_buffer('density_grid', density_grid) self.register_buffer('density_bitfield', density_bitfield) self.mean_density = 0 self.iter_density = 0 # time stamps for density grid times = ((torch.arange(self.time_size, dtype=torch.float32) + 0.5) / self.time_size).view(-1, 1, 1) # [T, 1, 1] self.register_buffer('times', times) # step counter step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging... self.register_buffer('step_counter', step_counter) self.mean_count = 0 self.local_step = 0 def run_cuda(self, rays_o, rays_d, light_d=None, ambient_ratio=1.0, shading='albedo', bg_color=None, perturb=False, T_thresh=1e-4, binarize=False, time=None, **kwargs): # rays_o, rays_d: [B, N, 3] / B,F,N,3 # return: image: [B, N, 3], depth: [B, N] # time: [B,F] prefix = rays_o.shape[:-1] batch_size = prefix[0] if prefix[0] != 1: raise "The prefix should be 1 if different frames has different camera pose in the current version" dynamic_cam = True if rays_o.ndim == 4 else False N = rays_o.shape[:-1].numel() # B * N, in fact device = rays_o.device if light_d is None: # gaussian noise around the ray origin, so the light always face the view dir (avoid dark face) light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) # [B,N,3] / B,F,N,3 if time is None: assert not self.dynamic_ft time_steps = torch.LongTensor([[0]]).reshape(1,1) # 1,1 time = torch.FloatTensor([[0]]).reshape(1,1) # 1,1 num_frames = 1 else: time_steps = torch.floor(time * self.time_size).clamp(min=0, max=self.time_size - 1).long() # B,F num_frames = time.size(1) if dynamic_cam: rays_o = rays_o[0].contiguous() # F,N,3 rays_d = rays_d[0].contiguous() # F,N,3 light_d = light_d[0].contiguous() # F,N,3 else: rays_o = rays_o.repeat(num_frames, 1, 1).contiguous() rays_d = rays_d.repeat(num_frames, 1, 1).contiguous() light_d = light_d.repeat(num_frames, 1, 1).contiguous() # ipdb.set_trace() results = {} # ipdb.set_trace() if self.training: # ipdb.set_trace() v_xyzs = [] v_dirs = [] v_light = [] v_time = [] v_idx = [0] v_rays = [] v_ts = [] v_kernels = [] for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() ## N,3 for the first 3, t is a value # pre-calculate near far nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) xyzs, dirs, ts, rays = raymarching.march_rays_train(_rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) flatten_rays = raymarching.flatten_rays(rays, xyzs.shape[0]).long() if _light_d.shape[0] > 1: _light_d = _light_d[flatten_rays] else: # 1,3 _light_d = _light_d.repeat(xyzs.size(0), 1) v_xyzs.append(xyzs) v_dirs.append(dirs) v_light.append(_light_d) sample_num = xyzs.size(0) v_time.append(torch.zeros((sample_num,1), device=xyzs.device, dtype=xyzs.dtype)+time[0, frame_idx].item()) # NOTE this should be real time # N v_idx.append(sample_num) v_rays.append(rays) v_ts.append(ts) v_xyzs = torch.cat(v_xyzs, dim=0) v_dirs = torch.cat(v_dirs, dim=0) v_light = torch.cat(v_light, dim=0) v_time = torch.cat(v_time, dim=0) v_idx = np.cumsum(v_idx).tolist() sigmas, rgbs, normals, deforms = self(v_xyzs, v_dirs, v_light, ratio=ambient_ratio, shading=shading, t=v_time) sigmas = self.density_scale * sigmas weights = [] weights_sum = [] depth = [] image = [] normal_image = [] # ipdb.set_trace() for frame_idx in range(num_frames): start_idx, end_idx = v_idx[frame_idx], v_idx[frame_idx+1] # _weights, _weights_sum, _depth, _image out = raymarching.composite_rays_train(sigmas[start_idx:end_idx], rgbs[start_idx:end_idx], v_ts[frame_idx], v_rays[frame_idx], T_thresh, binarize) if normals is not None: _, _, _, _normal_image = raymarching.composite_rays_train(sigmas[start_idx:end_idx].detach(), (normals[start_idx:end_idx] + 1) / 2, v_ts[frame_idx], v_rays[frame_idx], T_thresh, binarize) normal_image.append(_normal_image) weights.append(out[0]) weights_sum.append(out[1]) depth.append(out[2]) image.append(out[3]) # N,3 weights = torch.cat(weights) weights_sum = torch.stack(weights_sum, dim=0) # F,N depth = torch.stack(depth, dim=0) # F,N image = torch.stack(image, dim=0) # F,N,3 # normals related regularizations if self.opt.lambda_orient > 0 and normals is not None: # orientation loss loss_orient = weights.detach() * (normals * v_dirs).sum(-1).clamp(min=0) ** 2 results['loss_orient'] = loss_orient.mean() if self.opt.lambda_3d_normal_smooth > 0 and normals is not None: normals_perturb = self.normal(v_xyzs + torch.randn_like(v_xyzs) * 1e-2, t=v_time) results['loss_normal_perturb'] = (normals - normals_perturb).abs().mean() if normals is not None: # _, _, _, normal_image = raymarching.composite_rays_train(sigmas.detach(), (normals + 1) / 2, ts, rays, T_thresh, binarize) # results['normal_image'] = normal_image.view(*prefix, 3) normal_image = torch.stack(normal_image, dim=0) # F,N,3 results['normal_image'] = normal_image.view(batch_size, num_frames, -1, 3) # B,F,N,3 # weights normalization results['weights'] = weights # N'*F else: image_all = [] weights_sum_all = [] depth_all = [] # ipdb.set_trace() for frame_idx in range(num_frames): _rays_o, _rays_d, _light_d, t = rays_o[frame_idx], rays_d[frame_idx], light_d[frame_idx], time_steps[0,frame_idx].item() nears, fars = raymarching.near_far_from_aabb(_rays_o, _rays_d, self.aabb_train if self.training else self.aabb_infer) # allocate outputs dtype = torch.float32 weights_sum = torch.zeros(N, dtype=dtype, device=device) depth = torch.zeros(N, dtype=dtype, device=device) image = torch.zeros(N, 3, dtype=dtype, device=device) n_alive = N rays_alive = torch.arange(n_alive, dtype=torch.int32, device=device) # [N] rays_t = nears.clone() # [N] ## test must use the same step = 0 while step < self.opt.max_steps: # hard coded max step # count alive rays n_alive = rays_alive.shape[0] # exit loop if n_alive <= 0: break # decide compact_steps n_step = max(min(N // n_alive, 8), 1) xyzs, dirs, ts = raymarching.march_rays(n_alive, n_step, rays_alive, rays_t, _rays_o, _rays_d, self.bound, self.density_bitfield[t], self.cascade, self.grid_size, nears, fars, perturb if step == 0 else False, self.opt.dt_gamma, self.opt.max_steps) dirs = safe_normalize(dirs) s_time = torch.zeros((xyzs.size(0),1), device=xyzs.device, dtype=xyzs.dtype) + time[0, frame_idx].item() # ipdb.set_trace() sigmas, rgbs, normals, _ = self(xyzs, dirs, _light_d[:1], ratio=ambient_ratio, shading=shading, t=s_time) sigmas = self.density_scale * sigmas raymarching.composite_rays(n_alive, n_step, rays_alive, rays_t, sigmas, rgbs, ts, weights_sum, depth, image, T_thresh, binarize) rays_alive = rays_alive[rays_alive >= 0] #print(f'step = {step}, n_step = {n_step}, n_alive = {n_alive}, xyzs: {xyzs.shape}') step += n_step image_all.append(image) weights_sum_all.append(weights_sum) depth_all.append(depth) # ipdb.set_trace() weights_sum = torch.stack(weights_sum_all, dim=0) # F,N depth = torch.stack(depth_all, dim=0) # F,N image = torch.stack(image_all, dim=0) # F,N,3 # mix background color ## when bg_radius < 0 -> the way Magic123, during training, bg_color is always a random color, during inference, always 1 # ipdb.set_trace() if bg_color is None: if self.opt.bg_radius > 0 and self.bg_net is not None: # use the bg model to calculate bg_color ## NOTE here the camera should be fixed in the video ## rays_d F,N,3 bg_color = self.background(rays_d.reshape(-1, 3)) # [FN, 3] # this is irrelavant to time bg_color = bg_color.reshape(batch_size, num_frames, -1, 3) # F,N,3 else: bg_color = 1 image_wo_bg = image.view(batch_size, num_frames, -1, 3) image = image + (1 - weights_sum).unsqueeze(-1) * bg_color image = image.view(batch_size, num_frames, -1, 3) depth = depth.view(batch_size, num_frames, -1) weights_sum = weights_sum.reshape(batch_size, num_frames, -1) results['image'] = image # B,F,N,3 results['depth'] = depth # B,F,N results['weights_sum'] = weights_sum # B,F,N results['image_wo_bg'] = image_wo_bg # B,F,N,3 # ipdb.set_trace() return results @torch.no_grad() def update_extra_state(self, decay=0.95, S=128): # call before each epoch to update extra states. if not (self.cuda_ray): return if self.taichi_ray: raise NotImplementedError ### update density grid tmp_grid = - torch.ones_like(self.density_grid) # full update. # if self.iter_density < 16: # update only 16 times if True: # full update X = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Y = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) Z = torch.arange(self.grid_size, dtype=torch.int32, device=self.density_bitfield.device).split(S) for t, time in enumerate(self.times): for xs in X: for ys in Y: for zs in Z: # construct points
xx, yy, zz = custom_meshgrid(xs, ys, zs)
0
2023-11-23 10:34:08+00:00
24k
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n \n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n # density = self.density_act(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,680
(self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf(
scale_tensor(
10
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/chess/ilql/train_full_games_ilql.py
[ { "identifier": "ilql_loss", "path": "LLM_RL/algorithms/ilql/base_interface.py", "snippet": "def ilql_loss(\n q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n v_final: jax.Array, # [batch]\n target_q1: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n target_q2: jax.Array, # [batch, time-1] output is masked; shift x[:-1]\n q1_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n q2_logits: jax.Array, # [batch, time-1, vocab] output is masked; shift x[:-1]\n token_ids: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n attention_mask: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n should_take_action: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n rewards: jax.Array, # [batch, time-1] output is masked; shift x[1:]\n *, \n gamma: Union[float, jax.Array], \n tau: Union[float, jax.Array], \n cql_weight: Union[float, jax.Array], \n) -> Tuple[jnp.ndarray, Any]:\n # should be an action in the batch\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n \n q1sa_flat, q2sa_flat, v_flat = q1.reshape(-1), q2.reshape(-1), v.reshape(-1)\n target_q1sa_flat, target_q2sa_flat = target_q1.reshape(-1), target_q2.reshape(-1)\n vns_flat = jnp.concatenate((v, v_final[..., None]), axis=1).reshape(-1)\n\n qv_query_indicators = get_query_indicators(should_take_action.reshape(-1))\n\n is_next_state = should_take_action.copy()\n # set first action position to false\n is_next_state = is_next_state.at[jnp.arange(0, is_next_state.shape[0], dtype=jnp.int32), jnp.argmax(is_next_state.astype(jnp.int32), axis=1)].set(False)\n # set endpoint to true as long as there is at least 1 action in the sequence\n is_next_state = jnp.concatenate((is_next_state, (should_take_action.sum(axis=1) > 0)[..., None]), axis=1)\n\n vns_query_indicators = get_query_indicators(is_next_state.reshape(-1))\n # should be the same number of vns as qv, so we can clip the extra padding to match shape\n vns_query_indicators = vns_query_indicators[:qv_query_indicators.shape[0], :]\n \n # extract selected values\n q1sa_selected = (qv_query_indicators * q1sa_flat).sum(axis=1)\n q2sa_selected = (qv_query_indicators * q2sa_flat).sum(axis=1)\n v_selected = (qv_query_indicators * v_flat).sum(axis=1)\n target_q1sa_selected = (qv_query_indicators * target_q1sa_flat).sum(axis=1)\n target_q2sa_selected = (qv_query_indicators * target_q2sa_flat).sum(axis=1)\n vns_selected = (vns_query_indicators * vns_flat).sum(axis=1)\n rs_selected = (qv_query_indicators * rewards.reshape(-1)).sum(axis=1)\n\n # get masks for selected values\n sa_mask = (qv_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n ns_mask = (vns_query_indicators.sum(axis=1) > 0).astype(jnp.float32)\n\n # compute q loss\n q1_loss = (optax.l2_loss(q1sa_selected, jax.lax.stop_gradient(rs_selected + gamma * vns_selected)) * sa_mask).sum() / n\n q2_loss = (optax.l2_loss(q2sa_selected, jax.lax.stop_gradient(rs_selected + gamma * vns_selected)) * sa_mask).sum() / n\n\n # compute v loss\n target_q_selected = jnp.minimum(target_q1sa_selected, target_q2sa_selected)\n expectile_indicator = (target_q_selected >= v_selected).astype(jnp.float32)\n expectile_weights = expectile_indicator * tau + (1 - expectile_indicator) * (1 - tau)\n v_loss = (optax.l2_loss(v_selected, jax.lax.stop_gradient(target_q_selected)) * jax.lax.stop_gradient(expectile_weights) * sa_mask).sum() / n\n\n # compute cql loss on both q heads\n q1_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q1_logits, token_ids)\n q1_cql_loss = (mask * q1_cql_loss).sum() / n\n\n q2_cql_loss = optax.softmax_cross_entropy_with_integer_labels(q2_logits, token_ids)\n q2_cql_loss = (mask * q2_cql_loss).sum() / n\n \n loss = q1_loss + q2_loss + v_loss + cql_weight * (q1_cql_loss + q2_cql_loss)\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n q1_loss=q1_loss, \n q2_loss=q2_loss, \n v_loss=v_loss, \n q1_cql_loss=q1_cql_loss, \n q2_cql_loss=q2_cql_loss, \n ), \n q1=get_tensor_stats(q1sa_selected, mask=sa_mask, n=n), \n q2=get_tensor_stats(q2sa_selected, mask=sa_mask, n=n), \n v=get_tensor_stats(v_selected, mask=sa_mask, n=n), \n target_q=get_tensor_stats(target_q_selected, mask=sa_mask, n=n), \n target_q1=get_tensor_stats(target_q1sa_selected, mask=sa_mask, n=n), \n target_q2=get_tensor_stats(target_q2sa_selected, mask=sa_mask, n=n), \n vns=get_tensor_stats(vns_selected, mask=ns_mask, n=n), \n v_final=get_tensor_stats(v_final, mask=jnp.ones(v_final.shape, dtype=jnp.int32), n=v_final.shape[0]), \n rewards=get_tensor_stats(rewards, mask=mask, n=n), \n )\n\n return loss, logs" }, { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\n text: str\n is_action: bool" }, { "identifier": "text_env_eval", "path": "LLM_RL/environment.py", "snippet": "def text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n interactions, rewards, dones, eps_lengths = [], [], [], []\n for _ in tqdm(range((n_rollouts+(bsize-1))//bsize), disable=not verbose):\n actual_bsize = min(n_rollouts-len(interactions), bsize)\n npad = bsize - actual_bsize\n interaction_batch = interact_environment(\n env, \n policy, \n initial_text_history=initial_text_history, \n env_seed=[None]*actual_bsize if seed_generator is None else [next(seed_generator) for _ in range(actual_bsize)], \n env_options=[env_options]*actual_bsize, \n bsize=actual_bsize,\n npad=npad,\n )\n \n for interaction in interaction_batch:\n interactions.append(interaction)\n rewards.append(sum(map(lambda x: x.reward, interaction)))\n dones.append(interaction[-1].done)\n eps_lengths.append(len(interaction))\n if interaction_callback is not None:\n interaction_callback(interaction)\n \n rewards = np.asarray(rewards, dtype=np.float32)\n dones = np.asarray(dones, dtype=np.float32)\n results_summary = dict(\n reward=dict(\n mean=np.mean(rewards), \n std=np.std(rewards), \n min=np.min(rewards), \n max=np.max(rewards), \n ), \n done=dict(\n mean=np.mean(dones), \n std=np.std(dones), \n min=np.min(dones), \n max=np.max(dones), \n ), \n length=dict(\n mean=np.mean(eps_lengths),\n std=np.std(eps_lengths),\n min=np.min(eps_lengths),\n max=np.max(eps_lengths),\n ),\n )\n \n return interactions, results_summary" }, { "identifier": "TextTrajectory", "path": "LLM_RL/environment.py", "snippet": "class TextTrajectory:\n text_history: TextHistory\n reward: Tuple[float, ...]\n done: bool\n\n def __post_init__(self):\n assert len(self.reward) == len(self.text_history), \"reward is needed for each text\"\n assert all([r == 0.0 for r, t in zip(self.reward, self.text_history) if not t.is_action]), \"reward for non-actions texts should be 0.0\"" }, { "identifier": "TextTrajectoryChain", "path": "LLM_RL/environment.py", "snippet": "class TextTrajectoryChain:\n text_trajectory: TextTrajectory\n next: Optional[TextTrajectoryChain]" }, { "identifier": "TokenTrajectoryChain", "path": "LLM_RL/environment.py", "snippet": "class TokenTrajectoryChain:\n token_trajectory: TokenTrajectory\n next: Optional[TokenTrajectoryChain]\n\n def __post_init__(self):\n curr, dones = self, []\n while curr.next is not None:\n dones.append(curr.token_trajectory.done)\n curr = curr.next\n assert not np.any(dones[:-1]), 'token trajectory chain can only be done at the end'\n \n def to_list(self) -> List[TokenTrajectory]:\n curr, l = self, []\n while curr is not None:\n l.append(curr.token_trajectory)\n curr = curr.next\n return l\n\n @classmethod\n def from_text_trajectory_chain(\n cls, \n text_trajectory_chain: TextTrajectoryChain, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectoryChain:\n return TokenTrajectoryChain(\n TokenTrajectory.from_text_trajectory(\n text_trajectory_chain.text_trajectory, \n tokenizer, \n token_process=token_process, \n ), \n cls.from_text_trajectory_chain(\n text_trajectory_chain.next, \n tokenizer, \n token_process=token_process, \n ) if text_trajectory_chain.next is not None else None, \n )" }, { "identifier": "GPT2ILQLTrain", "path": "LLM_RL/algorithms/ilql/gpt2/interface.py", "snippet": "class GPT2ILQLTrain(ILQLTrain):\n @classmethod\n def load_train(\n cls, \n base_train_state: TrainState, \n target_base_params: Optional[PyTree], \n q1_head_train_state: TrainState, \n q2_head_train_state: TrainState, \n v_head_train_state: TrainState, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n v_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n detach_q1: bool, \n detach_q2: bool, \n detach_v: bool, \n polyak_alpha: float, \n hard_update_every: Optional[int], \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n assert mesh == v_head_model.config.mesh\n base_train_state_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_train_state)\n target_base_params_partition_spec = PS() if target_base_params is None else match_partition_rules(base_model.config.get_partition_rules(), target_base_params)\n q1_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_head_train_state)\n q2_head_train_state_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q2_head_train_state)\n v_head_train_state_partition_spec = match_partition_rules(v_head_model.config.get_partition_rules(), v_head_train_state)\n q1_target_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_target_head_params)\n q2_target_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q2_target_head_params)\n\n @partial(\n pjit, \n donate_argnums=(0, 1, 2, 3, 4, 5, 6), \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _step(\n base_train_state: TrainState, \n target_base_params: Optional[PyTree], \n q1_head_train_state: TrainState, \n q2_head_train_state: TrainState, \n v_head_train_state: TrainState, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n\n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n rewards: jax.Array, \n dones: jax.Array, \n\n next_token_ids: Optional[jax.Array], \n next_tokens_attention_mask: Optional[jax.Array], \n next_tokens_position_ids: Optional[jax.Array], \n next_dones: Optional[jax.Array], \n\n prng_key: Optional[jax.random.PRNGKeyArray], \n train: bool=True, \n ) -> Tuple[TrainState, Optional[PyTree], TrainState, TrainState, TrainState, PyTree, PyTree, jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n rewards = with_named_sharding_constraint(rewards, mesh, PS(('dp', 'fsdp'), None))\n dones = with_named_sharding_constraint(dones, mesh, PS(('dp', 'fsdp')))\n if next_token_ids is not None:\n assert next_tokens_attention_mask is not None\n assert next_tokens_position_ids is not None\n next_token_ids = with_named_sharding_constraint(next_token_ids, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_attention_mask = with_named_sharding_constraint(next_tokens_attention_mask, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_position_ids = with_named_sharding_constraint(next_tokens_position_ids, mesh, PS(('dp', 'fsdp'), None))\n next_dones = with_named_sharding_constraint(next_dones, mesh, PS(('dp', 'fsdp')))\n else:\n assert next_tokens_attention_mask is None\n assert next_tokens_position_ids is None\n\n # define loss function\n\n def grad_loss(base_params: PyTree, q1_head_params: PyTree, q2_head_params: PyTree, v_head_params: PyTree, prng_key: jax.random.PRNGKeyArray):\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n if target_base_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_base_model_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=target_base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n else:\n target_base_model_output = base_model_output\n \n if next_token_ids is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_base_model_output = base_model(\n input_ids=next_token_ids, \n attention_mask=next_tokens_attention_mask, \n position_ids=next_tokens_position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1_head_output = q_head_model.apply(\n {'params': q1_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2_head_output = q_head_model.apply(\n {'params': q2_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v_head_output = v_head_model.apply(\n {'params': v_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q1_head_output = q_head_model.apply(\n {'params': q1_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q2_head_output = q_head_model.apply(\n {'params': q2_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # stop gradients\n if detach_q1:\n q1_head_output = jax.lax.stop_gradient(q1_head_output)\n if detach_q2:\n q2_head_output = jax.lax.stop_gradient(q2_head_output)\n if detach_v:\n v_head_output = jax.lax.stop_gradient(v_head_output)\n target_q1_head_output = jax.lax.stop_gradient(target_q1_head_output)\n target_q2_head_output = jax.lax.stop_gradient(target_q2_head_output)\n\n q1 = jnp.take_along_axis(q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q2 = jnp.take_along_axis(q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n v = v_head_output[:, :-1].squeeze(2)\n v_full = v_head_output.squeeze(2)\n target_q1 = jnp.take_along_axis(target_q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n target_q2 = jnp.take_along_axis(target_q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n\n q1_logits = q1_head_output[:, :-1, :].astype(jnp.float32)\n q2_logits = q2_head_output[:, :-1, :].astype(jnp.float32)\n\n # get next token values\n\n if next_token_ids is not None:\n # just run vf on last token to save some flops\n last_next_token_idxs = (next_tokens_attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(next_tokens_attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_next_token_h = next_token_base_model_output.hidden_states[-1][jnp.arange(0, input_ids.shape[0], dtype=jnp.int32), last_next_token_idxs, :]\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_v_head_output = v_head_model.apply(\n {'params': v_head_params}, \n final_next_token_h, \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(1)\n v_final = next_token_v_head_output * (1 - next_dones.astype(jnp.float32))\n else:\n last_action_idxs = (should_take_action.shape[1]-1)-jnp.argmax(jnp.flip(should_take_action, axis=1).astype(jnp.int32), axis=1)+1\n last_token_idxs = (attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_state_idxs = ((1 - dones) * last_action_idxs + dones * last_token_idxs).astype(jnp.int32)\n v_final = v_full[jnp.arange(0, should_take_action.shape[0], dtype=jnp.int32), final_state_idxs]\n v_final = v_final * (1 - dones)\n v_final = jax.lax.stop_gradient(v_final)\n\n loss, info = loss_fn(\n q1, \n q2, \n v, \n v_final, \n target_q1, \n target_q2, \n q1_logits, \n q2_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n rewards, \n )\n return loss, info\n\n # take loss\n (loss, info), (base_grads, q1_head_grads, q2_head_grads, v_head_grads) = jax.value_and_grad(grad_loss, has_aux=True, argnums=(0, 1, 2, 3))(\n base_train_state.params, \n q1_head_train_state.params, \n q2_head_train_state.params, \n v_head_train_state.params, \n prng_key, \n )\n # assert shard gradients\n base_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n base_grads, \n base_train_state_partition_spec.params, \n )\n q1_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q1_head_grads, \n q1_head_train_state_partition_spec.params, \n )\n q2_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n q2_head_grads, \n q2_head_train_state_partition_spec.params, \n )\n v_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n v_head_grads, \n v_head_train_state_partition_spec.params, \n )\n # update params and optim state\n base_train_state = base_train_state.apply_gradients(grads=base_grads)\n q1_head_train_state = q1_head_train_state.apply_gradients(grads=q1_head_grads)\n q2_head_train_state = q2_head_train_state.apply_gradients(grads=q2_head_grads)\n v_head_train_state = v_head_train_state.apply_gradients(grads=v_head_grads)\n\n # handle target network updates\n def update_targets(params: PyTree, base_params: PyTree, steps: jnp.ndarray) -> PyTree:\n base_params = optax.incremental_update(params, base_params, polyak_alpha)\n if hard_update_every is not None:\n base_params = optax.periodic_update(params, base_params, steps, hard_update_every)\n return base_params\n \n def mid_targets(params: PyTree, base_params: PyTree, steps: jnp.ndarray) -> PyTree:\n return base_params\n\n def update_cond(opt_state: PyTree) -> bool:\n if hasattr(opt_state, 'mini_step'):\n return opt_state.mini_step == 0\n return True\n \n if target_base_params is not None:\n target_base_params = jax.lax.cond(\n update_cond(base_train_state.opt_state), \n update_targets, \n mid_targets, \n base_train_state.params, \n target_base_params, \n base_train_state.step, \n )\n q1_target_head_params = jax.lax.cond(\n update_cond(q1_head_train_state.opt_state), \n update_targets, \n mid_targets, \n q1_head_train_state.params, \n q1_target_head_params, \n q1_head_train_state.step, \n )\n q2_target_head_params = jax.lax.cond(\n update_cond(q2_head_train_state.opt_state), \n update_targets, \n mid_targets, \n q2_head_train_state.params, \n q2_target_head_params, \n q2_head_train_state.step, \n )\n\n return base_train_state, target_base_params, q1_head_train_state, q2_head_train_state, v_head_train_state, q1_target_head_params, q2_target_head_params, loss, info\n\n return cls(\n base_train_state=base_train_state, \n target_base_params=target_base_params, \n q1_head_train_state=q1_head_train_state, \n q2_head_train_state=q2_head_train_state, \n v_head_train_state=v_head_train_state, \n q1_target_head_params=q1_target_head_params, \n q2_target_head_params=q2_target_head_params, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=v_head_model, \n tokenizer=tokenizer, \n _step=_step, \n )" }, { "identifier": "GPT2ILQLInference", "path": "LLM_RL/algorithms/ilql/gpt2/interface.py", "snippet": "class GPT2ILQLInference(ILQLInference):\n @classmethod\n def load_inference(\n cls, \n value_inference: GPT2ValueRLInference, \n target_value_inference: GPT2ValueRLInference, \n loss_fn: Callable, \n use_target_base_for_loss: bool=True, \n ):\n mesh = value_inference.base_model.config.mesh\n assert mesh is not None\n assert mesh == value_inference.q_head_model.config.mesh\n assert mesh == value_inference.v_head_model.config.mesh\n assert mesh == target_value_inference.base_model.config.mesh\n assert mesh == target_value_inference.q_head_model.config.mesh\n\n base_params_partition_spec = match_partition_rules(value_inference.base_model.config.get_partition_rules(), value_inference.base_params)\n target_base_params_partition_spec = PS() if (not use_target_base_for_loss) else match_partition_rules(target_value_inference.base_model.config.get_partition_rules(), target_value_inference.base_params)\n q1_head_params_partition_spec = match_partition_rules(value_inference.q_head_model.config.get_partition_rules(), value_inference.q1_head_params)\n q2_head_params_partition_spec = match_partition_rules(value_inference.q_head_model.config.get_partition_rules(), value_inference.q2_head_params)\n v_head_params_partition_spec = match_partition_rules(value_inference.v_head_model.config.get_partition_rules(), value_inference.v_head_params)\n q1_target_head_params_partition_spec = match_partition_rules(target_value_inference.q_head_model.config.get_partition_rules(), target_value_inference.q1_head_params)\n q2_target_head_params_partition_spec = match_partition_rules(target_value_inference.q_head_model.config.get_partition_rules(), target_value_inference.q2_head_params)\n \n @partial(\n pjit, \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), target_base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_target_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_target_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _eval_loss(\n base_params: PyTree, \n target_base_params: Optional[PyTree], \n q1_head_params: PyTree, \n q2_head_params: PyTree, \n v_head_params: PyTree, \n q1_target_head_params: PyTree, \n q2_target_head_params: PyTree, \n\n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n rewards: jax.Array, \n dones: jax.Array, \n\n next_token_ids: Optional[jax.Array], \n next_tokens_attention_mask: Optional[jax.Array], \n next_tokens_position_ids: Optional[jax.Array], \n next_dones: Optional[jax.Array], \n\n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n train: bool=False, \n ) -> Tuple[jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n rewards = with_named_sharding_constraint(rewards, mesh, PS(('dp', 'fsdp'), None))\n dones = with_named_sharding_constraint(dones, mesh, PS(('dp', 'fsdp')))\n if next_token_ids is not None:\n assert next_tokens_attention_mask is not None\n assert next_tokens_position_ids is not None\n next_token_ids = with_named_sharding_constraint(next_token_ids, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_attention_mask = with_named_sharding_constraint(next_tokens_attention_mask, mesh, PS(('dp', 'fsdp'), None))\n next_tokens_position_ids = with_named_sharding_constraint(next_tokens_position_ids, mesh, PS(('dp', 'fsdp'), None))\n next_dones = with_named_sharding_constraint(next_dones, mesh, PS(('dp', 'fsdp')))\n else:\n assert next_tokens_attention_mask is None\n assert next_tokens_position_ids is None\n \n # get base hidden states\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_model_output = value_inference.base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n if target_base_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_base_model_output = target_value_inference.base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=target_base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n else:\n target_base_model_output = base_model_output\n \n if next_token_ids is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_base_model_output = value_inference.base_model(\n input_ids=next_token_ids, \n attention_mask=next_tokens_attention_mask, \n position_ids=next_tokens_position_ids, \n params=base_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n \n # get values\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1_head_output = value_inference.q_head_model.apply(\n {'params': q1_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2_head_output = value_inference.q_head_model.apply(\n {'params': q2_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v_head_output = value_inference.v_head_model.apply(\n {'params': v_head_params}, \n base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q1_head_output = target_value_inference.q_head_model.apply(\n {'params': q1_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n target_q2_head_output = target_value_inference.q_head_model.apply(\n {'params': q2_target_head_params}, \n target_base_model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n\n # process outputs\n\n q1 = jnp.take_along_axis(q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n q2 = jnp.take_along_axis(q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n v = v_head_output[:, :-1].squeeze(2)\n v_full = v_head_output.squeeze(2)\n target_q1 = jnp.take_along_axis(target_q1_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n target_q2 = jnp.take_along_axis(target_q2_head_output[:, :-1], input_ids[:, 1:][..., None], axis=2).squeeze(2)\n\n q1_logits = q1_head_output[:, :-1, :].astype(jnp.float32)\n q2_logits = q2_head_output[:, :-1, :].astype(jnp.float32)\n\n # get next token values\n\n if next_token_ids is not None:\n # just run vf on last token to save some flops\n last_next_token_idxs = (next_tokens_attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(next_tokens_attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_next_token_h = next_token_base_model_output.hidden_states[-1][jnp.arange(0, input_ids.shape[0], dtype=jnp.int32), last_next_token_idxs, :]\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n next_token_v_head_output = value_inference.v_head_model.apply(\n {'params': v_head_params}, \n final_next_token_h, \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(1)\n v_final = next_token_v_head_output * (1 - next_dones.astype(jnp.float32))\n else:\n last_action_idxs = (should_take_action.shape[1]-1)-jnp.argmax(jnp.flip(should_take_action, axis=1).astype(jnp.int32), axis=1)+1\n last_token_idxs = (attention_mask.shape[1]-1)-jnp.argmax(jnp.flip(attention_mask, axis=1).astype(jnp.int32), axis=1)\n final_state_idxs = ((1 - dones) * last_action_idxs + dones * last_token_idxs).astype(jnp.int32)\n v_final = v_full[jnp.arange(0, should_take_action.shape[0], dtype=jnp.int32), final_state_idxs]\n v_final = v_final * (1 - dones)\n\n loss, info = loss_fn(\n q1, \n q2, \n v, \n v_final, \n target_q1, \n target_q2, \n q1_logits, \n q2_logits, \n input_ids[:, 1:], \n attention_mask[:, 1:], \n should_take_action, \n rewards, \n )\n \n return loss, info\n\n return cls(\n value_inference=value_inference, \n target_value_inference=target_value_inference, \n _eval_loss=_eval_loss, \n use_target_base_for_loss=use_target_base_for_loss, \n )" }, { "identifier": "GPT2ValuePolicy", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValuePolicy(ValueRLPolicy):\n def __init__(\n self, \n inference: ValueRLInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n pi_beta_params, base_params, \\\n q1_head_params, q2_head_params = policy_params\n self.inference = self.inference.replace(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n )" }, { "identifier": "GPT2ValueRLInference", "path": "LLM_RL/algorithms/value_rl_base/gpt2/interface.py", "snippet": "class GPT2ValueRLInference(ValueRLInference):\n @classmethod\n def load_inference(\n cls, \n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n pi_beta_model: Optional[FlaxPreTrainedModel], \n base_model: FlaxPreTrainedModel, \n q_head_model: nn.Module, \n v_head_model: Optional[nn.Module], \n tokenizer: PreTrainedTokenizerBase, \n beta: float=0.0, \n dp_shard_logits: bool=True, \n ):\n mesh = base_model.config.mesh\n assert mesh is not None\n assert mesh == q_head_model.config.mesh\n if v_head_model is not None:\n assert mesh == v_head_model.config.mesh\n assert (pi_beta_model is None and pi_beta_params is None) or (pi_beta_model is not None and pi_beta_params is not None)\n \n pi_beta_params_partition_spec = PS() if pi_beta_params is None else match_partition_rules(pi_beta_model.config.get_partition_rules(), pi_beta_params)\n base_params_partition_spec = match_partition_rules(base_model.config.get_partition_rules(), base_params)\n q1_head_params_partition_spec = match_partition_rules(q_head_model.config.get_partition_rules(), q1_head_params)\n q2_head_params_partition_spec = PS() if q2_head_params is None else match_partition_rules(q_head_model.config.get_partition_rules(), q2_head_params)\n v_head_params_partition_spec = PS() if v_head_params is None else match_partition_rules(v_head_model.config.get_partition_rules(), v_head_params)\n\n generator = None\n if pi_beta_model is not None:\n generator = GPT2ValueRLGeneration(\n base_model_config=base_model.config, \n pi_beta=pi_beta_model, \n value_base=base_model, \n q_head=q_head_model, \n beta=beta, \n )\n\n if pi_beta_params is not None:\n @partial(\n pjit, \n static_argnames=('generation_config', 'trace'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), pi_beta_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=NamedSharding(mesh, PS()), \n )\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n # NOTE: position_ids ignored by transformers\n\n # generate from model\n output = generator.generate(\n input_ids=input_ids, \n attention_mask=attention_mask, \n params=(pi_beta_params, base_params, q1_head_params, q2_head_params), \n prng_key=prng_key, \n generation_config=StreamingGenerationConfig.from_dict(generation_config) if generation_config is not None else None, \n trace=trace, \n )\n \n return output\n else:\n def _generate(\n pi_beta_params: Optional[PyTree], \n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n generation_config: Optional[FrozenDict]=None, \n trace: bool=True, \n ) -> Union[FlaxSampleOutput, FlaxGreedySearchOutput, FlaxBeamSearchOutput]:\n raise NotImplementedError\n \n @partial(\n pjit, \n static_argnames=('output_attentions', 'train'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), base_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q1_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), q2_head_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), v_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=ValueRLForwardOutput(\n base_raw_output=FlaxCausalLMOutputWithCrossAttentions(\n logits=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n hidden_states=NamedSharding(mesh, PS()), # assume no sharding for hidden states\n attentions=NamedSharding(mesh, PS()), # assume no sharding for attentions\n cross_attentions=NamedSharding(mesh, PS()), # assume no sharding for cross attentions\n past_key_values=NamedSharding(mesh, PS()), # assume no sharding for past key values\n ), \n q1=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n q2=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if (dp_shard_logits and q2_head_params is not None) else NamedSharding(mesh, PS()), \n v=NamedSharding(mesh, PS()), \n ), \n )\n def _forward(\n base_params: PyTree, \n q1_head_params: PyTree, \n q2_head_params: Optional[PyTree], \n v_head_params: Optional[PyTree], \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n output_attentions: Optional[bool]=None, \n train: bool=False, \n ) -> ValueRLForwardOutput:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n\n # get logits\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n base_output = base_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=base_params, \n train=train, \n output_attentions=output_attentions, \n output_hidden_states=True, \n dropout_rng=new_key, \n )\n # trunc padded logits\n base_output = base_output.replace(logits=base_output.logits.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf')))\n\n # get q1\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q1 = q_head_model.apply(\n {'params': q1_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q1 = q1.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n\n # get q2\n if q2_head_params is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n q2 = q_head_model.apply(\n {'params': q2_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n )\n # trunc padded qs\n q2 = q2.at[:, :, base_model.config.unpadded_vocab_size:].set(-float('inf'))\n else:\n q2 = None\n\n if v_head_params is not None:\n # get v\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n v = v_head_model.apply(\n {'params': v_head_params}, \n base_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if prng_key is not None else None, \n ).squeeze(2)\n else:\n v = None\n\n # assert sharding on outputs\n if dp_shard_logits:\n base_output = base_output.replace(logits=with_named_sharding_constraint(base_output.logits, mesh, PS((\"dp\", \"fsdp\"), None, None)))\n q1 = with_named_sharding_constraint(q1, mesh, PS((\"dp\", \"fsdp\"), None, None))\n if q2 is not None:\n q2 = with_named_sharding_constraint(q2, mesh, PS((\"dp\", \"fsdp\"), None, None))\n return ValueRLForwardOutput(\n base_raw_output=base_output, \n q1=q1, \n q2=q2, \n v=v, \n )\n\n return cls(\n pi_beta_params=pi_beta_params, \n base_params=base_params, \n q1_head_params=q1_head_params, \n q2_head_params=q2_head_params, \n v_head_params=v_head_params, \n pi_beta_model=pi_beta_model, \n base_model=base_model, \n q_head_model=q_head_model, \n v_head_model=v_head_model, \n tokenizer=tokenizer, \n _generate=_generate, \n _forward=_forward,\n )" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/mlp_head.py", "snippet": "def load_train_state_from_config(\n model_config: MLPHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, MLPHead]:\n \n model = MLPHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "MLPHeadConfig", "path": "LLM_RL/heads/mlp_head.py", "snippet": "class MLPHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n hidden_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n layer1_initializer_range: Optional[int]=None, \n layer1_bias_init: Optional[float]=None, \n layer2_initializer_range: Optional[int]=None, \n layer2_bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.layer1_initializer_range = layer1_initializer_range\n self.layer1_bias_init = layer1_bias_init\n self.layer2_initializer_range = layer2_initializer_range\n self.layer2_bias_init = layer2_bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense1']['kernel']\"), PS(\"fsdp\", \"mp\")), \n (re.escape(\"['dense1']['bias']\"), PS(\"mp\")), \n (re.escape(\"['dense2']['kernel']\"), PS(\"mp\", \"fsdp\")), \n (re.escape(\"['dense2']['bias']\"), PS()), \n ]\n\n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = MLPHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "ILQLDataset", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLDataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n rewards: np.ndarray, # [b, t-1]\n dones: np.ndarray, # [b]\n next_token_ids: Optional[np.ndarray], # [b, t']\n next_dones: Optional[np.ndarray], # [b]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (rewards.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == rewards.shape[0]\n assert input_ids.shape[0] == dones.shape[0]\n if next_token_ids is not None:\n assert input_ids.shape[0] == next_token_ids.shape[0]\n if next_dones is not None:\n assert input_ids.shape[0] == next_dones.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.rewards = rewards\n self.dones = dones\n self.next_token_ids = next_token_ids\n self.next_dones = next_dones\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'rewards': jnp.asarray(self.rewards[index], dtype=jnp.float32), \n 'dones': jnp.asarray(self.dones[index], dtype=jnp.float32), \n 'next_token_ids': jnp.asarray(self.next_token_ids[index], dtype=jnp.float32) if self.next_token_ids is not None else None, \n 'next_dones': jnp.asarray(self.next_dones[index], dtype=jnp.float32) if self.next_dones is not None else None, \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_ilql_data_list(\n cls, \n ilql_data_list: List[ILQLData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> ILQLDataset:\n \n data = ILQLData.block(ilql_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "ILQLIterableDataset", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLIterableDataset(IterableDataset):\n def __init__(self, ilql_data: Iterable[Dict[str, np.ndarray]]):\n self.ilql_data = ilql_data\n \n def __iter__(self):\n return _ILQLIteratorDataset(iter(self.ilql_data))\n \n @classmethod\n def from_ilql_data_iterable(\n cls, \n ilql_data: Iterable[ILQLData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> ILQLIterableDataset:\n \n class _TokensIterable(Iterable):\n def _tokens_generator(self):\n for item in ilql_data:\n yield jax.tree_util.tree_map(lambda x: x[0], ILQLData.block([item], blocking_strategy, tokenizer))\n\n def __iter__(self):\n return self._tokens_generator()\n\n return cls(_TokensIterable())" }, { "identifier": "eval_loss", "path": "LLM_RL/algorithms/ilql/train.py", "snippet": "def eval_loss(\n inference: ILQLInference, \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: Optional[KeyArray], \n bsize: int, \n eval_batches: Optional[int], \n) -> Dict[str, Any]:\n # setup evaluator loop state\n eval_logs = []\n\n # eval on batches\n prng_key, new_prng = jax.random.split(prng_key) if prng_key is not None else (None, None)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for i, batch in tqdm(enumerate(d)):\n # conditionally terminate early\n if eval_batches is not None and i >= eval_batches:\n break\n\n # get eval logs\n _, info = inference.eval_loss(**batch)\n eval_logs.append(info)\n \n # gather and postproc eval logs\n eval_logs = pull_logs(combine_logs(eval_logs))\n return eval_logs" }, { "identifier": "train_loop", "path": "LLM_RL/algorithms/ilql/train.py", "snippet": "def train_loop(\n trainer: ILQLTrain, \n inference: Union[ValueRLInference, ILQLInference], \n evaluator: Optional[Callable[[Inference], Tuple[float, Dict[str, Any]]]], \n dataset: Union[Seq2SeqDataset, Seq2SeqIterableDataset], \n prng_key: KeyArray, \n save_dir: Optional[str], \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[Train, Inference]:\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n epoch = -1\n\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n base_model=trainer.base_model, \n q_head_model=trainer.q_head_model, \n v_head_model=trainer.v_head_model, \n base_train_state=trainer.base_train_state, \n target_base_params=trainer.target_base_params, \n q1_head_train_state=trainer.q1_head_train_state, \n q2_head_train_state=trainer.q2_head_train_state, \n v_head_train_state=trainer.v_head_train_state, \n q1_target_head_params=trainer.q1_target_head_params, \n q2_target_head_params=trainer.q2_target_head_params, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _inference_update():\n nonlocal inference\n if isinstance(inference, ValueRLInference):\n inference = inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q1_head_train_state.params, \n q2_head_params=trainer.q2_head_train_state.params, \n v_head_params=trainer.v_head_train_state.params, \n )\n elif isinstance(inference, ILQLInference):\n new_value_inference = inference.value_inference.replace(\n base_params=trainer.base_train_state.params, \n q1_head_params=trainer.q1_head_train_state.params, \n q2_head_params=trainer.q2_head_train_state.params, \n v_head_params=trainer.v_head_train_state.params, \n )\n new_target_value_inference = inference.target_value_inference.replace(\n base_params=trainer.target_base_params, \n q1_head_params=trainer.q1_target_head_params, \n q2_head_params=trainer.q2_target_head_params, \n )\n inference = inference.replace(\n value_inference=new_value_inference, \n target_value_inference=new_target_value_inference, \n )\n else:\n raise NotImplementedError\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n # get eval logs\n _inference_update()\n eval_perf, eval_logs = evaluator(inference)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n \n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # begin training loop\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n for batch in tqdm(d, total=steps_per_epoch):\n \n # step model and get training logs\n prng_key, new_prng = jax.random.split(prng_key)\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name=f'step_{step+1}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n step += 1\n\n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n _inference_update()\n return trainer, inference" }, { "identifier": "ILQLData", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLData(NamedTuple):\n input_ids: np.ndarray # [t]\n should_take_action: np.ndarray # [t-1]\n rewards: np.ndarray # [t-1]\n done: np.ndarray # []\n next_token_ids: Optional[np.ndarray] # [t']\n next_done: Optional[np.ndarray] # []\n\n @staticmethod\n def block(\n data: List[ILQLData], \n blocking_strategy: BlockingStrategy, \n tokenizer: PreTrainedTokenizerBase, \n ) -> Dict[str, np.ndarray]:\n has_next_token = any(map(lambda x: x.next_token_ids is not None, data))\n assert all(map(lambda x: x.next_token_ids is None, data)) or has_next_token\n assert all(map(lambda x: x.next_done is None, data)) or has_next_token\n\n return dict(\n input_ids=block_sequences(\n list(map(lambda x: x.input_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ), \n should_take_action=block_sequences(\n list(map(lambda x: x.should_take_action, data)), \n False, \n dtype=np.bool_, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n rewards=block_sequences(\n list(map(lambda x: x.rewards, data)), \n 0.0, \n dtype=np.float32, \n blocking_strategy=blocking_strategy._replace(max_length=blocking_strategy.max_length-1), \n ), \n dones=np.asarray(list(map(lambda x: x.done, data)), dtype=np.bool_), \n next_token_ids=block_sequences(\n list(map(lambda x: x.next_token_ids, data)), \n tokenizer.pad_token_id, \n dtype=np.int32, \n blocking_strategy=blocking_strategy, \n ) if has_next_token else None, \n next_dones=np.asarray(list(map(lambda x: x.next_done, data)), dtype=np.bool_) if has_next_token else None, \n )\n \n @classmethod\n def from_token_trajectory_chain(\n cls, \n token_trajectory_chain: TokenTrajectoryChain, \n ):\n if token_trajectory_chain.next is not None:\n if token_trajectory_chain.next.token_trajectory.is_action[1:].sum() > 0:\n first_next_action = np.argmax(token_trajectory_chain.next.token_trajectory.is_action[1:], axis=0)+1\n next_token_ids = token_trajectory_chain.next.token_trajectory.tokens[:first_next_action]\n next_done = False\n else:\n next_token_ids = token_trajectory_chain.next.token_trajectory.tokens\n next_done = token_trajectory_chain.next.token_trajectory.done\n else:\n next_token_ids, next_done = None, None\n return cls(\n input_ids=token_trajectory_chain.token_trajectory.tokens, \n should_take_action=token_trajectory_chain.token_trajectory.is_action[1:], \n rewards=token_trajectory_chain.token_trajectory.reward[1:], \n done=token_trajectory_chain.token_trajectory.done, \n next_token_ids=next_token_ids, \n next_done=next_done, \n )" }, { "identifier": "ILQLDataset", "path": "LLM_RL/algorithms/ilql/data.py", "snippet": "class ILQLDataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n rewards: np.ndarray, # [b, t-1]\n dones: np.ndarray, # [b]\n next_token_ids: Optional[np.ndarray], # [b, t']\n next_dones: Optional[np.ndarray], # [b]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (rewards.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == rewards.shape[0]\n assert input_ids.shape[0] == dones.shape[0]\n if next_token_ids is not None:\n assert input_ids.shape[0] == next_token_ids.shape[0]\n if next_dones is not None:\n assert input_ids.shape[0] == next_dones.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.rewards = rewards\n self.dones = dones\n self.next_token_ids = next_token_ids\n self.next_dones = next_dones\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'rewards': jnp.asarray(self.rewards[index], dtype=jnp.float32), \n 'dones': jnp.asarray(self.dones[index], dtype=jnp.float32), \n 'next_token_ids': jnp.asarray(self.next_token_ids[index], dtype=jnp.float32) if self.next_token_ids is not None else None, \n 'next_dones': jnp.asarray(self.next_dones[index], dtype=jnp.float32) if self.next_dones is not None else None, \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_ilql_data_list(\n cls, \n ilql_data_list: List[ILQLData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> ILQLDataset:\n \n data = ILQLData.block(ilql_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "FenChessHistoryEnv", "path": "llm_rl_scripts/chess/env/env.py", "snippet": "class FenChessHistoryEnv(TextEnv):\n def __init__(self, max_moves=400, from_position=None, random_opponent=False):\n super().__init__()\n self.chess_env = ChessEnv(fen=True, from_position=from_position, random_opponent=random_opponent)\n self.from_position = from_position\n self.max_moves = max_moves\n self.from_position = from_position\n # self.initial_history = initial_history\n\n def reset(self, seed: Optional[int] = None, options: Optional[Dict] = None):\n self.init_state, _ = self.chess_env.reset()\n self.num_moves_made = 0\n return (Text(preprocess_state_og(self.init_state), False),)\n\n def step(self, text_history: TextHistory):\n assert text_history[-1].is_action\n action = text_history[-1].text\n action = postprocess_move(action)\n st, reward, done, opp_mv = self.chess_env.step(action) \n new_state = Text(preprocess_state_og(st), False)\n self.num_moves_made += 1\n if self.num_moves_made > self.max_moves:\n done = 1\n return (new_state,), reward, done\n \n def copy(self):\n return FenChessHistoryEnv( self.max_moves, self.from_position)" } ]
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ilql.base_interface import ilql_loss from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain from LLM_RL.algorithms.ilql.gpt2.interface import GPT2ILQLTrain, GPT2ILQLInference from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ilql.data import ILQLDataset from LLM_RL.algorithms.ilql.data import ILQLIterableDataset from functools import partial from JaxSeq.logs import pull_logs from LLM_RL.algorithms.ilql.train import eval_loss, train_loop from LLM_RL.algorithms.ilql.data import ILQLData, ILQLDataset from JaxSeq.utils import multihost_device_get from transformers import GPT2TokenizerFast from IPython import embed from llm_rl_scripts.chess.env.env import FenChessHistoryEnv import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import json import random
21,116
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_endgames_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.7, cql_weight: float=1.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=160, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=10, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=5, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=5, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=True ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def ilql_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain)
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_endgames_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.7, cql_weight: float=1.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=160, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=10, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=5, save_at_beginning: bool=False, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=5, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=True ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def ilql_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)], [0, obj[-1]["reward"]], True) curr_chain = TextTrajectoryChain(text_trajectory=last_trajectory, next=None) # curr_chain.next = curr_chain for traj in reversed(obj): # iterate through move history backwards except for last transition # embed() prev_trajectory = TextTrajectory([Text(traj["state"], False), Text(traj["action"], True)], [0, traj["reward"]], False) curr_chain = TextTrajectoryChain(text_trajectory=prev_trajectory, next=curr_chain)
token_trajectory_chain = TokenTrajectoryChain.from_text_trajectory_chain(curr_chain, tokenizer)
5
2023-11-21 00:16:42+00:00
24k